diff --git a/go.mod b/go.mod
index 9dea298d4..177c5f1ea 100644
--- a/go.mod
+++ b/go.mod
@@ -3,35 +3,34 @@ module github.com/containers/common
go 1.15
require (
- github.com/BurntSushi/toml v0.3.1
- github.com/containers/image/v5 v5.12.0
- github.com/containers/ocicrypt v1.1.1
- github.com/containers/storage v1.32.1
+ github.com/BurntSushi/toml v1.4.0
+ github.com/containers/image/v5 v5.33.0
+ github.com/containers/ocicrypt v1.2.0
+ github.com/containers/storage v1.56.0
github.com/disiqueira/gotree/v3 v3.0.2
- github.com/docker/distribution v2.7.1+incompatible
- github.com/docker/docker v20.10.7+incompatible
- github.com/docker/go-units v0.4.0
+ github.com/docker/distribution v2.8.3+incompatible
+ github.com/docker/docker v27.3.1+incompatible
+ github.com/docker/go-units v0.5.0
github.com/ghodss/yaml v1.0.0
- github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/go-multierror v1.1.1
github.com/jinzhu/copier v0.3.2
- github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
- github.com/onsi/ginkgo v1.16.4
- github.com/onsi/gomega v1.13.0
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/gomega v1.31.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
- github.com/opencontainers/runc v1.0.0-rc95
- github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
- github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.8.1
+ github.com/opencontainers/image-spec v1.1.0
+ github.com/opencontainers/runc v1.1.14
+ github.com/opencontainers/runtime-spec v1.2.0
+ github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626
+ github.com/opencontainers/selinux v1.11.1
github.com/pkg/errors v0.9.1
- github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
- github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v1.1.3
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646
+ github.com/sirupsen/logrus v1.9.3
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.9.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
- golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
- golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
- golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
+ github.com/tchap/go-patricia v2.3.0+incompatible // indirect
+ golang.org/x/sync v0.8.0
+ golang.org/x/sys v0.26.0
+ golang.org/x/term v0.25.0
)
diff --git a/go.sum b/go.sum
index 039b219f0..6b81e0582 100644
--- a/go.sum
+++ b/go.sum
@@ -1,46 +1,2267 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
+bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
+bitbucket.org/creachadair/shell v0.0.7/go.mod h1:oqtXSSvSYr4624lnnabXHaBsYW6RD80caLi2b3hJk0U=
+bitbucket.org/creachadair/shell v0.0.8/go.mod h1:vINzudofoUXZSJ5tREgpy+Etyjsag3ait5WOWImEVZ0=
+cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+chainguard.dev/go-grpc-kit v0.17.5/go.mod h1:vQGcwZiX6jXwhyLPCZwVMvjITD+XcrSmQzuCTW/XcVc=
+chainguard.dev/go-oidctest v0.3.1/go.mod h1:TDN6MPJ6BEzWtorS9/dHzzGiaBpPRnRONv2SE0mqitU=
+chainguard.dev/sdk v0.1.23/go.mod h1:TojPLtaHmy1TThjHjvPQbwKRDCusBPpQWSBZ2EkQFFk=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.92.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
+cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
+cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM=
+cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
+cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
+cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
+cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms=
+cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8=
+cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E=
+cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
+cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68=
+cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0=
+cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8=
+cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc=
+cloud.google.com/go/accessapproval v1.7.5/go.mod h1:g88i1ok5dvQ9XJsxpUInWWvUBrIZhyPDPbk4T01OoJ0=
+cloud.google.com/go/accessapproval v1.7.6/go.mod h1:bdDCS3iLSLhlK3pu8lJClaeIVghSpTLGChl1Ihr9Fsc=
+cloud.google.com/go/accessapproval v1.7.7/go.mod h1:10ZDPYiTm8tgxuMPid8s2DL93BfCt6xBh/Vg0Xd8pU0=
+cloud.google.com/go/accessapproval v1.7.9/go.mod h1:teNI+P/xzZ3dppGXEYFvSmuOvmTjLE9toPq21WHssYc=
+cloud.google.com/go/accessapproval v1.7.10/go.mod h1:iOXZj2B/c3N8nf2PYOB3iuRKCbnkn19/F6fqaa2zhn8=
+cloud.google.com/go/accessapproval v1.7.11/go.mod h1:KGK3+CLDWm4BvjN0wFtZqdFUGhxlTvTF6PhAwQJGL4M=
+cloud.google.com/go/accessapproval v1.7.12/go.mod h1:wvyt8Okohbq1i8/aPbCMBNwGQFZaNli5d+1qa/5zgGo=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps=
+cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo=
+cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk=
+cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ=
+cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M=
+cloud.google.com/go/accesscontextmanager v1.8.5/go.mod h1:TInEhcZ7V9jptGNqN3EzZ5XMhT6ijWxTGjzyETwmL0Q=
+cloud.google.com/go/accesscontextmanager v1.8.6/go.mod h1:rMC0Z8pCe/JR6yQSksprDc6swNKjMEvkfCbaesh+OS0=
+cloud.google.com/go/accesscontextmanager v1.8.7/go.mod h1:jSvChL1NBQ+uLY9zUBdPy9VIlozPoHptdBnRYeWuQoM=
+cloud.google.com/go/accesscontextmanager v1.8.9/go.mod h1:IXvQesVgOC7aXgK9OpYFn5eWnzz8fazegIiJ5WnCOVw=
+cloud.google.com/go/accesscontextmanager v1.8.10/go.mod h1:hdwcvyIn3NXgjSiUanbL7drFlOl39rAoj5SKBrNVtyA=
+cloud.google.com/go/accesscontextmanager v1.8.11/go.mod h1:nwPysISS3KR5qXipAU6cW/UbDavDdTBBgPohbkhGSok=
+cloud.google.com/go/accesscontextmanager v1.8.12/go.mod h1:EmaVYmffq+2jA2waP0/XHECDkaOKVztxVsdzl65t8hw=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4=
+cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4=
+cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo=
+cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw=
+cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
+cloud.google.com/go/aiplatform v1.54.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
+cloud.google.com/go/aiplatform v1.57.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
+cloud.google.com/go/aiplatform v1.58.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
+cloud.google.com/go/aiplatform v1.58.2/go.mod h1:c3kCiVmb6UC1dHAjZjcpDj6ZS0bHQ2slL88ZjC2LtlA=
+cloud.google.com/go/aiplatform v1.60.0/go.mod h1:eTlGuHOahHprZw3Hio5VKmtThIOak5/qy6pzdsqcQnM=
+cloud.google.com/go/aiplatform v1.62.2/go.mod h1:ViLUVST6/gJAR80fyZmFSOn77rPHDkXqZDMDr4Qb8OM=
+cloud.google.com/go/aiplatform v1.66.0/go.mod h1:bPQS0UjaXaTAq57UgP3XWDCtYFOIbXXpkMsl6uP4JAc=
+cloud.google.com/go/aiplatform v1.67.0/go.mod h1:s/sJ6btBEr6bKnrNWdK9ZgHCvwbZNdP90b3DDtxxw+Y=
+cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
+cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA=
+cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8=
+cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w=
+cloud.google.com/go/analytics v0.22.0/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w=
+cloud.google.com/go/analytics v0.23.0/go.mod h1:YPd7Bvik3WS95KBok2gPXDqQPHy08TsCQG6CdUCb+u0=
+cloud.google.com/go/analytics v0.23.1/go.mod h1:N+piBUJo0RfnVTa/u8E/d31jAxxQaHlnoJfUx0dechM=
+cloud.google.com/go/analytics v0.23.2/go.mod h1:vtE3olAXZ6edJYk1UOndEs6EfaEc9T2B28Y4G5/a7Fo=
+cloud.google.com/go/analytics v0.23.4/go.mod h1:1iTnQMOr6zRdkecW+gkxJpwV0Q/djEIII3YlXmyf7UY=
+cloud.google.com/go/analytics v0.23.5/go.mod h1:J54PE6xjbmbTA5mOOfX5ibafOs9jyY7sFKTTiAnIIY4=
+cloud.google.com/go/analytics v0.23.6/go.mod h1:cFz5GwWHrWQi8OHKP9ep3Z4pvHgGcG9lPnFQ+8kXsNo=
+cloud.google.com/go/analytics v0.24.0/go.mod h1:NpavJSb6TSO56hGpX1+4JL7js6AkKl27TEqzW9Sn7E4=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA=
+cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8=
+cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y=
+cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY=
+cloud.google.com/go/apigateway v1.6.5/go.mod h1:6wCwvYRckRQogyDDltpANi3zsCDl6kWi0b4Je+w2UiI=
+cloud.google.com/go/apigateway v1.6.6/go.mod h1:bFH3EwOkeEC+31wVxKNuiadhk2xa7y9gJ3rK4Mctq6o=
+cloud.google.com/go/apigateway v1.6.7/go.mod h1:7wAMb/33Rzln+PrGK16GbGOfA1zAO5Pq6wp19jtIt7c=
+cloud.google.com/go/apigateway v1.6.9/go.mod h1:YE9XDTFwq859O6TpZNtatBMDWnMRZOiTVF+Ru3oCBeY=
+cloud.google.com/go/apigateway v1.6.10/go.mod h1:3bRZnd+TDYONxRw2W8LB1jG3pDONS7GHJXMm5+BtQ+k=
+cloud.google.com/go/apigateway v1.6.11/go.mod h1:4KsrYHn/kSWx8SNUgizvaz+lBZ4uZfU7mUDsGhmkWfM=
+cloud.google.com/go/apigateway v1.6.12/go.mod h1:2RX6Op78cxqMtENfJW8kKpwtBCFVJGyvBtSR9l6v7aM=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs=
+cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18=
+cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo=
+cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0=
+cloud.google.com/go/apigeeconnect v1.6.5/go.mod h1:MEKm3AiT7s11PqTfKE3KZluZA9O91FNysvd3E6SJ6Ow=
+cloud.google.com/go/apigeeconnect v1.6.6/go.mod h1:j8V/Xj51tEUl/cWnqwlolPvCpHj5OvgKrHEGfmYXG9Y=
+cloud.google.com/go/apigeeconnect v1.6.7/go.mod h1:hZxCKvAvDdKX8+eT0g5eEAbRSS9Gkzi+MPWbgAMAy5U=
+cloud.google.com/go/apigeeconnect v1.6.9/go.mod h1:tl53uGgVG1A00qK1dF6wGIji0CQIMrLdNccJ6+R221U=
+cloud.google.com/go/apigeeconnect v1.6.10/go.mod h1:MZf8FZK+0JZBcncSSnUkzWw2n2fQnEdIvfI6J7hGcEY=
+cloud.google.com/go/apigeeconnect v1.6.11/go.mod h1:iMQLTeKxtKL+sb0D+pFlS/TO6za2IUOh/cwMEtn/4g0=
+cloud.google.com/go/apigeeconnect v1.6.12/go.mod h1:/DSr1IlfzrXeKjS6c3+8P04avr+4U5S7J3F69SNGFkY=
+cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
+cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw=
+cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8=
+cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw=
+cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8=
+cloud.google.com/go/apigeeregistry v0.8.3/go.mod h1:aInOWnqF4yMQx8kTjDqHNXjZGh/mxeNlAf52YqtASUs=
+cloud.google.com/go/apigeeregistry v0.8.4/go.mod h1:oA6iN7olOol8Rc28n1qd2q0LSD3ro2pdf/1l/y8SK4E=
+cloud.google.com/go/apigeeregistry v0.8.5/go.mod h1:ZMg60hq2K35tlqZ1VVywb9yjFzk9AJ7zqxrysOxLi3o=
+cloud.google.com/go/apigeeregistry v0.8.7/go.mod h1:Jge1HQaIkNU8JYSDY7l5SveeSKvGPvtLjzNjLU2+0N8=
+cloud.google.com/go/apigeeregistry v0.8.8/go.mod h1:0pDUUsNGiqCuBlD0VoPX2ssug6/vJ6BBPg8o4qPkE4k=
+cloud.google.com/go/apigeeregistry v0.8.9/go.mod h1:4XivwtSdfSO16XZdMEQDBCMCWDp3jkCBRhVgamQfLSA=
+cloud.google.com/go/apigeeregistry v0.8.10/go.mod h1:3uJa4XfNqvhIvKksKEE7UahxZY1/2Uj07cCfT/RJZZM=
+cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
+cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY=
+cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo=
+cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk=
+cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg=
+cloud.google.com/go/appengine v1.8.5/go.mod h1:uHBgNoGLTS5di7BvU25NFDuKa82v0qQLjyMJLuPQrVo=
+cloud.google.com/go/appengine v1.8.6/go.mod h1:J0Vk696gUey9gbmTub3Qe4NYPy6qulXMkfwcQjadFnM=
+cloud.google.com/go/appengine v1.8.7/go.mod h1:1Fwg2+QTgkmN6Y+ALGwV8INLbdkI7+vIvhcKPZCML0g=
+cloud.google.com/go/appengine v1.8.9/go.mod h1:sw8T321TAto/u6tMinv3AV63olGH/hw7RhG4ZgNhqFs=
+cloud.google.com/go/appengine v1.8.10/go.mod h1:4jh9kPp01PeN//i+yEHjIQ5153f/F9q/CDbNTMYBlU4=
+cloud.google.com/go/appengine v1.8.11/go.mod h1:xET3coaDUj+OP4TgnZlgQ+rG2R9fG2nblya13czP56Q=
+cloud.google.com/go/appengine v1.8.12/go.mod h1:31Ib+S1sYnRQmCtfGqEf6EfzsiYy98EuDtLlvmpmx6U=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg=
+cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4=
+cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0=
+cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M=
+cloud.google.com/go/area120 v0.8.5/go.mod h1:BcoFCbDLZjsfe4EkCnEq1LKvHSK0Ew/zk5UFu6GMyA0=
+cloud.google.com/go/area120 v0.8.6/go.mod h1:sjEk+S9QiyDt1fxo75TVut560XZLnuD9lMtps0qQSH0=
+cloud.google.com/go/area120 v0.8.7/go.mod h1:L/xTq4NLP9mmxiGdcsVz7y1JLc9DI8pfaXRXbnjkR6w=
+cloud.google.com/go/area120 v0.8.9/go.mod h1:epLvbmajRp919r1LGdvS1zgcHJt/1MTQJJ9+r0/NBQc=
+cloud.google.com/go/area120 v0.8.10/go.mod h1:vTEko4eg1VkkkEzWDjLtMwBHgm7L4x8HgWE8fgEUd5k=
+cloud.google.com/go/area120 v0.8.11/go.mod h1:VBxJejRAJqeuzXQBbh5iHBYUkIjZk5UzFZLCXmzap2o=
+cloud.google.com/go/area120 v0.8.12/go.mod h1:W94qTbrwhzGimOeoClrGdm5DAkMGlg/V6Maldra5QM8=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
+cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E=
+cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U=
+cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI=
+cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU=
+cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE=
+cloud.google.com/go/artifactregistry v1.14.7/go.mod h1:0AUKhzWQzfmeTvT4SjfI4zjot72EMfrkvL9g9aRjnnM=
+cloud.google.com/go/artifactregistry v1.14.8/go.mod h1:1UlSXh6sTXYrIT4kMO21AE1IDlMFemlZuX6QS+JXW7I=
+cloud.google.com/go/artifactregistry v1.14.9/go.mod h1:n2OsUqbYoUI2KxpzQZumm6TtBgtRf++QulEohdnlsvI=
+cloud.google.com/go/artifactregistry v1.14.11/go.mod h1:ahyKXer42EOIddYzk2zYfvZnByGPdAYhXqBbRBsGizE=
+cloud.google.com/go/artifactregistry v1.14.12/go.mod h1:00qcBxCdu0SKIYPhFOymrsJpdacjBHVSiCsRkyqlRUA=
+cloud.google.com/go/artifactregistry v1.14.13/go.mod h1:zQ/T4xoAFPtcxshl+Q4TJBgsy7APYR/BLd2z3xEAqRA=
+cloud.google.com/go/artifactregistry v1.14.14/go.mod h1:lPHksFcKpcZRrhGNx87a6SSygv0hfWi6Cd0gnWIUU4U=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ=
+cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg=
+cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4=
+cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs=
+cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU=
+cloud.google.com/go/asset v1.16.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU=
+cloud.google.com/go/asset v1.17.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU=
+cloud.google.com/go/asset v1.17.1/go.mod h1:byvDw36UME5AzGNK7o4JnOnINkwOZ1yRrGrKIahHrng=
+cloud.google.com/go/asset v1.17.2/go.mod h1:SVbzde67ehddSoKf5uebOD1sYw8Ab/jD/9EIeWg99q4=
+cloud.google.com/go/asset v1.18.0/go.mod h1:JnuX7WLUc55AFKJOqF0n7gBPZostxZQCHPEu2WQ1980=
+cloud.google.com/go/asset v1.18.1/go.mod h1:QXivw0mVqwrhZyuX6iqFbyfCdzYE9AFCJVG47Eh5dMM=
+cloud.google.com/go/asset v1.19.1/go.mod h1:kGOS8DiCXv6wU/JWmHWCgaErtSZ6uN5noCy0YwVaGfs=
+cloud.google.com/go/asset v1.19.3/go.mod h1:1j8NNcHsbSE/KeHMZrizPIS6c8nm0WjEAPoFXzXNCj4=
+cloud.google.com/go/asset v1.19.4/go.mod h1:zSEhgb9eNLeBcl4eSO/nsrh1MyUNCBynvyRaFnXMaeY=
+cloud.google.com/go/asset v1.19.5/go.mod h1:sqyLOYaLLfc4ACcn3YxqHno+J7lRt9NJTdO50zCUcY0=
+cloud.google.com/go/asset v1.19.6/go.mod h1:UsijVGuWC6uml/+ODlL+mv6e3dZ52fbdOfOkiv4f0cE=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
+cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4=
+cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs=
+cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U=
+cloud.google.com/go/assuredworkloads v1.11.5/go.mod h1:FKJ3g3ZvkL2D7qtqIGnDufFkHxwIpNM9vtmhvt+6wqk=
+cloud.google.com/go/assuredworkloads v1.11.6/go.mod h1:1dlhWKocQorGYkspt+scx11kQCI9qVHOi1Au6Rw9srg=
+cloud.google.com/go/assuredworkloads v1.11.7/go.mod h1:CqXcRH9N0KCDtHhFisv7kk+cl//lyV+pYXGi1h8rCEU=
+cloud.google.com/go/assuredworkloads v1.11.9/go.mod h1:uZ6+WHiT4iGn1iM1wk5njKnKJWiM3v/aYhDoCoHxs1w=
+cloud.google.com/go/assuredworkloads v1.11.10/go.mod h1:x6pCPBbTVjXbAWu35spKLY3AU4Pmcn4GeXnkZGxOVhU=
+cloud.google.com/go/assuredworkloads v1.11.11/go.mod h1:vaYs6+MHqJvLKYgZBOsuuOhBgNNIguhRU0Kt7JTGcnI=
+cloud.google.com/go/assuredworkloads v1.11.12/go.mod h1:yYnk9icCH5XEkqjJinBNBDv5mSvi1FYhpA9Q+BpTwew=
+cloud.google.com/go/auth v0.2.1/go.mod h1:khQRBNrvNoHiHhV1iu2x8fSnlNbCaVHilznW5MAI5GY=
+cloud.google.com/go/auth v0.2.2/go.mod h1:2bDNJWtWziDT3Pu1URxHHbkHE/BbOCuyUiKIGcNvafo=
+cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w=
+cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro=
+cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=
+cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
+cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g=
+cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4=
+cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
+cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
+cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
+cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc=
+cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM=
+cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk=
+cloud.google.com/go/auth/oauth2adapt v0.2.1/go.mod h1:tOdK/k+D2e4GEwfBRA48dKNQiDsqIXxLh7VU319eV0g=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
+cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE=
+cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg=
+cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY=
+cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8=
+cloud.google.com/go/automl v1.13.5/go.mod h1:MDw3vLem3yh+SvmSgeYUmUKqyls6NzSumDm9OJ3xJ1Y=
+cloud.google.com/go/automl v1.13.6/go.mod h1:/0VtkKis6KhFJuPzi45e0E+e9AdQE09SNieChjJqU18=
+cloud.google.com/go/automl v1.13.7/go.mod h1:E+s0VOsYXUdXpq0y4gNZpi0A/s6y9+lAarmV5Eqlg40=
+cloud.google.com/go/automl v1.13.9/go.mod h1:KECCWW2AFsRuEVxUJEIXxcm3yPLf1rxS+qsBamyacMc=
+cloud.google.com/go/automl v1.13.10/go.mod h1:I5nlZ4sBYIX90aBwv3mm5A0W6tlGbzrJ4nkaErdsmAk=
+cloud.google.com/go/automl v1.13.11/go.mod h1:oMJdXRDOVC+Eq3PnGhhxSut5Hm9TSyVx1aLEOgerOw8=
+cloud.google.com/go/automl v1.13.12/go.mod h1:Rw8hmEIlKyvdhbFXjLrLvM2qNKZNwf5oraS5DervadE=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA=
+cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw=
+cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88=
+cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM=
+cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g=
+cloud.google.com/go/baremetalsolution v1.2.4/go.mod h1:BHCmxgpevw9IEryE99HbYEfxXkAEA3hkMJbYYsHtIuY=
+cloud.google.com/go/baremetalsolution v1.2.5/go.mod h1:CImy7oNMC/7vLV1Ig68Og6cgLWuVaghDrm+sAhYSSxA=
+cloud.google.com/go/baremetalsolution v1.2.6/go.mod h1:KkS2BtYXC7YGbr42067nzFr+ABFMs6cxEcA1F+cedIw=
+cloud.google.com/go/baremetalsolution v1.2.8/go.mod h1:Ai8ENs7ADMYWQ45DtfygUc6WblhShfi3kNPvuGv8/ok=
+cloud.google.com/go/baremetalsolution v1.2.9/go.mod h1:eFlsoR4Im039D+EVn1fKXEKWNPoMW2ewXBTHmjEZxlM=
+cloud.google.com/go/baremetalsolution v1.2.10/go.mod h1:eO2c2NMRy5ytcNPhG78KPsWGNsX5W/tUsCOWmYihx6I=
+cloud.google.com/go/baremetalsolution v1.2.11/go.mod h1:bqthxNtU+n3gwWxoyXVR9VdSqIfVcgmpYtBlXQkeWq8=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A=
+cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk=
+cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk=
+cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8=
+cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98=
+cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU=
+cloud.google.com/go/batch v1.7.0/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU=
+cloud.google.com/go/batch v1.8.0/go.mod h1:k8V7f6VE2Suc0zUM4WtoibNrA6D3dqBpB+++e3vSGYc=
+cloud.google.com/go/batch v1.8.2/go.mod h1:QA8HAbPZ1tTcTFZmRgk7v13i0tNQ9x6s1wsZl6jZQh0=
+cloud.google.com/go/batch v1.8.3/go.mod h1:mnDskkuz1h+6i/ra8IMhTf8HwG8GOswSRKPJdAOgSbE=
+cloud.google.com/go/batch v1.8.6/go.mod h1:rQovrciYbtuY40Uprg/IWLlhmUR1GZYzX9xnymUdfBU=
+cloud.google.com/go/batch v1.8.7/go.mod h1:O5/u2z8Wc7E90Bh4yQVLQIr800/0PM5Qzvjac3Jxt4k=
+cloud.google.com/go/batch v1.9.0/go.mod h1:VhRaG/bX2EmeaPSHvtptP5OAhgYuTrvtTAulKM68oiI=
+cloud.google.com/go/batch v1.9.1/go.mod h1:UGOBIGCUNo9NPeJ4VvmGpnTbE8vTewNhFaI/ZcQZaHk=
+cloud.google.com/go/batch v1.9.2/go.mod h1:smqwS4sleDJVAEzBt/TzFfXLktmWjFNugGDWl8coKX4=
+cloud.google.com/go/batch v1.9.4/go.mod h1:qqfXThFPI9dyDK1PfidiEOM/MrS+jUQualcQJytJCLA=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
+cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
+cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk=
+cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc=
+cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo=
+cloud.google.com/go/beyondcorp v1.0.4/go.mod h1:Gx8/Rk2MxrvWfn4WIhHIG1NV7IBfg14pTKv1+EArVcc=
+cloud.google.com/go/beyondcorp v1.0.5/go.mod h1:lFRWb7i/w4QBFW3MbM/P9wX15eLjwri/HYvQnZuk4Fw=
+cloud.google.com/go/beyondcorp v1.0.6/go.mod h1:wRkenqrVRtnGFfnyvIg0zBFUdN2jIfeojFF9JJDwVIA=
+cloud.google.com/go/beyondcorp v1.0.8/go.mod h1:2WaEvUnw+1ZIUNu227h71X/Q8ypcWWowii9TQ4xlfo0=
+cloud.google.com/go/beyondcorp v1.0.9/go.mod h1:xa0eU8tIbYVraMOpRh5V9PirdYROvTUcPayJW9UlSNs=
+cloud.google.com/go/beyondcorp v1.0.10/go.mod h1:G09WxvxJASbxbrzaJUMVvNsB1ZiaKxpbtkjiFtpDtbo=
+cloud.google.com/go/beyondcorp v1.0.11/go.mod h1:V0EIXuYoyqKkHfnNCYZrNv6M+WYWJGIr5h019LurF3I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
+cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec=
+cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA=
+cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug=
+cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik=
+cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc=
+cloud.google.com/go/bigquery v1.60.0/go.mod h1:Clwk2OeC0ZU5G5LDg7mo+h8U7KlAa5v06z5rptKdM3g=
+cloud.google.com/go/bigquery v1.61.0/go.mod h1:PjZUje0IocbuTOdq4DBOJLNYB0WF3pAKBHzAYyxCwFo=
+cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA=
+cloud.google.com/go/bigtable v1.18.1/go.mod h1:NAVyfJot9jlo+KmgWLUJ5DJGwNDoChzAcrecLpmuAmY=
+cloud.google.com/go/bigtable v1.20.0/go.mod h1:upJDn8frsjzpRMfybiWkD1PG6WCCL7CRl26MgVeoXY4=
+cloud.google.com/go/bigtable v1.27.1/go.mod h1:AMREzzQzYjiWYan7JvJXINc8dfqemnNBWDHlYONtPLw=
+cloud.google.com/go/bigtable v1.27.2-0.20240725222120-ce31365acc54/go.mod h1:NmJ2jfoB34NxQyk4w7UCchopqE9r+a186ewvGrM79TI=
+cloud.google.com/go/bigtable v1.27.2-0.20240730134218-123c88616251/go.mod h1:avmXcmxVbLJAo9moICRYMgDyTTPoV0MA0lHKnyqV4fQ=
+cloud.google.com/go/bigtable v1.27.2-0.20240802230159-f371928b558f/go.mod h1:avmXcmxVbLJAo9moICRYMgDyTTPoV0MA0lHKnyqV4fQ=
+cloud.google.com/go/bigtable v1.29.0/go.mod h1:5p909nNdWaNUcWs6KGZO8mI5HUovstlmrIi7+eA5PTQ=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA=
+cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64=
+cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64=
+cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg=
+cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU=
+cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk=
+cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk=
+cloud.google.com/go/billing v1.18.2/go.mod h1:PPIwVsOOQ7xzbADCwNe8nvK776QpfrOAUkvKjCUcpSE=
+cloud.google.com/go/billing v1.18.3/go.mod h1:RuLq6KCY/YQfB2X/hCv3xpsrrBCdxnMS0pJcL7qqx5w=
+cloud.google.com/go/billing v1.18.4/go.mod h1:hECVHwfls2hhA/wrNVAvZ48GQzMxjWkQRq65peAnxyc=
+cloud.google.com/go/billing v1.18.5/go.mod h1:lHw7fxS6p7hLWEPzdIolMtOd0ahLwlokW06BzbleKP8=
+cloud.google.com/go/billing v1.18.7/go.mod h1:RreCBJPmaN/lzCz/2Xl1hA+OzWGqrzDsax4Qjjp0CbA=
+cloud.google.com/go/billing v1.18.8/go.mod h1:oFsuKhKiuxK7dDQ4a8tt5/1cScEo4IzhssWj6TTdi6k=
+cloud.google.com/go/billing v1.18.9/go.mod h1:bKTnh8MBfCMUT1fzZ936CPN9rZG7ZEiHB2J3SjIjByc=
+cloud.google.com/go/billing v1.18.10/go.mod h1:Lt+Qrjqsde38l/h1+9fzu44Pv9t+Suyf/p973mrg+xU=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U=
+cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154=
+cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE=
+cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0=
+cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU=
+cloud.google.com/go/binaryauthorization v1.8.0/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU=
+cloud.google.com/go/binaryauthorization v1.8.1/go.mod h1:1HVRyBerREA/nhI7yLang4Zn7vfNVA3okoAR9qYQJAQ=
+cloud.google.com/go/binaryauthorization v1.8.2/go.mod h1:/v3/F2kBR5QmZBnlqqzq9QNwse8OFk+8l1gGNUzjedw=
+cloud.google.com/go/binaryauthorization v1.8.3/go.mod h1:Cul4SsGlbzEsWPOz2sH8m+g2Xergb6ikspUyQ7iOThE=
+cloud.google.com/go/binaryauthorization v1.8.5/go.mod h1:2npTMgNJPsmUg0jfmDDORuqBkTPEW6ZSTHXzfxTvN1M=
+cloud.google.com/go/binaryauthorization v1.8.6/go.mod h1:GAfktMiQW14Y67lIK5q9QSbzYc4NE/xIpQemVRhIVXc=
+cloud.google.com/go/binaryauthorization v1.8.7/go.mod h1:cRj4teQhOme5SbWQa96vTDATQdMftdT5324BznxANtg=
+cloud.google.com/go/binaryauthorization v1.8.8/go.mod h1:D7B3gkNPdZ1Zj2IEyfypDTgbwFgTWE2SE6Csz0f46jg=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI=
+cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ=
+cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00=
+cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE=
+cloud.google.com/go/certificatemanager v1.7.5/go.mod h1:uX+v7kWqy0Y3NG/ZhNvffh0kuqkKZIXdvlZRO7z0VtM=
+cloud.google.com/go/certificatemanager v1.8.0/go.mod h1:5qq/D7PPlrMI+q9AJeLrSoFLX3eTkLc9MrcECKrWdIM=
+cloud.google.com/go/certificatemanager v1.8.1/go.mod h1:hDQzr50Vx2gDB+dOfmDSsQzJy/UPrYRdzBdJ5gAVFIc=
+cloud.google.com/go/certificatemanager v1.8.3/go.mod h1:QS0jxTu5wgEbzaYgGs/GBYKvVgAgc9jnYaaTFH8jRtE=
+cloud.google.com/go/certificatemanager v1.8.4/go.mod h1:knD4QGjaogN6hy/pk1f2Cz1fhU8oYeYSF710RRf+d6k=
+cloud.google.com/go/certificatemanager v1.8.5/go.mod h1:r2xINtJ/4xSz85VsqvjY53qdlrdCjyniib9Jp98ZKKM=
+cloud.google.com/go/certificatemanager v1.8.6/go.mod h1:ZsK7vU+XFDfSRwOqB4GjAGzawIIA3dWPXaFC9I5Jsts=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc=
+cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0=
+cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ=
+cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk=
+cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE=
+cloud.google.com/go/channel v1.17.4/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE=
+cloud.google.com/go/channel v1.17.5/go.mod h1:FlpaOSINDAXgEext0KMaBq/vwpLMkkPAw9b2mApQeHc=
+cloud.google.com/go/channel v1.17.6/go.mod h1:fr0Oidb2mPfA0RNcV+JMSBv5rjpLHjy9zVM5PFq6Fm4=
+cloud.google.com/go/channel v1.17.7/go.mod h1:b+FkgBrhMKM3GOqKUvqHFY/vwgp+rwsAuaMd54wCdN4=
+cloud.google.com/go/channel v1.17.9/go.mod h1:h9emIJm+06sK1FxqC3etsWdG87tg92T24wimlJs6lhY=
+cloud.google.com/go/channel v1.17.10/go.mod h1:TzcYuXlpeex8O483ofkxbY/DKRF49NBumZTJPvjstVA=
+cloud.google.com/go/channel v1.17.11/go.mod h1:gjWCDBcTGQce/BSMoe2lAqhlq0dIRiZuktvBKXUawp0=
+cloud.google.com/go/channel v1.17.12/go.mod h1:DoVQacEH1YuNqIZVN8v67cXGxaUyOgjrst+/+pkVqWU=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
+cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU=
+cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg=
+cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM=
+cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM=
+cloud.google.com/go/cloudbuild v1.15.1/go.mod h1:gIofXZSu+XD2Uy+qkOrGKEx45zd7s28u/k8f99qKals=
+cloud.google.com/go/cloudbuild v1.16.0/go.mod h1:CCWnqxLxEdh8kpOK83s3HTNBTpoIFn/U9j8DehlUyyA=
+cloud.google.com/go/cloudbuild v1.16.1/go.mod h1:c2KUANTtCBD8AsRavpPout6Vx8W+fsn5zTsWxCpWgq4=
+cloud.google.com/go/cloudbuild v1.16.3/go.mod h1:KJYZAwTUaDKDdEHwLj/EmnpmwLkMuq+fGnBEHA1LlE4=
+cloud.google.com/go/cloudbuild v1.16.4/go.mod h1:YSNmtWgg9lmL4st4+lej1XywNEUQnbyA/F+DdXPBevA=
+cloud.google.com/go/cloudbuild v1.16.5/go.mod h1:HXLpZ8QeYZgmDIWpbl9Gs22p6o6uScgQ/cV9HF9cIZU=
+cloud.google.com/go/cloudbuild v1.16.6/go.mod h1:Y7+6WFO8pT53rG0Lve6OZoO4+RkVTHGnHG7EB3uNiQw=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI=
+cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM=
+cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk=
+cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU=
+cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc=
+cloud.google.com/go/clouddms v1.7.4/go.mod h1:RdrVqoFG9RWI5AvZ81SxJ/xvxPdtcRhFotwdE79DieY=
+cloud.google.com/go/clouddms v1.7.5/go.mod h1:O4GVvxKPxbXlVfxkoUIXi8UAwwIHoszYm32dJ8tgbvE=
+cloud.google.com/go/clouddms v1.7.6/go.mod h1:8HWZ2tznZ0mNAtTpfnRNT0QOThqn9MBUqTj0Lx8npIs=
+cloud.google.com/go/clouddms v1.7.8/go.mod h1:KQpBMxH99ZTPK4LgXkYUntzRQ5hcNkjpGRbNSRzW9Nk=
+cloud.google.com/go/clouddms v1.7.9/go.mod h1:U2j8sOFtsIovea96mz2joyNMULl43TGadf7tOAUKKzs=
+cloud.google.com/go/clouddms v1.7.10/go.mod h1:PzHELq0QDyA7VaD9z6mzh2mxeBz4kM6oDe8YxMxd4RA=
+cloud.google.com/go/clouddms v1.7.11/go.mod h1:rPNK0gJEkF2//rdxhCKhx+IFBlzkObOZhlhvDY1JKCE=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk=
+cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys=
+cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0=
+cloud.google.com/go/cloudtasks v1.12.6/go.mod h1:b7c7fe4+TJsFZfDyzO51F7cjq7HLUlRi/KZQLQjDsaY=
+cloud.google.com/go/cloudtasks v1.12.7/go.mod h1:I6o/ggPK/RvvokBuUppsbmm4hrGouzFbf6fShIm0Pqc=
+cloud.google.com/go/cloudtasks v1.12.8/go.mod h1:aX8qWCtmVf4H4SDYUbeZth9C0n9dBj4dwiTYi4Or/P4=
+cloud.google.com/go/cloudtasks v1.12.10/go.mod h1:OHJzRAdE+7H00cdsINhb21ugVLDgk3Uh4r0holCB5XQ=
+cloud.google.com/go/cloudtasks v1.12.11/go.mod h1:uDR/oUmPZqL2rNz9M9MXvm07hkkLnvvUORbud8MA5p4=
+cloud.google.com/go/cloudtasks v1.12.12/go.mod h1:8UmM+duMrQpzzRREo0i3x3TrFjsgI/3FQw3664/JblA=
+cloud.google.com/go/cloudtasks v1.12.13/go.mod h1:53OmmKqQTocrbeCL13cuaryBQOflyO8s4NxuRHJlXgc=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
+cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
+cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns=
+cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
+cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI=
+cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
+cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE=
+cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
+cloud.google.com/go/compute v1.27.0/go.mod h1:LG5HwRmWFKM2C5XxHRiNzkLLXW48WwvyVC0mfWsYPOM=
+cloud.google.com/go/compute v1.27.2/go.mod h1:YQuHkNEwP3bIz4LBYQqf4DIMfFtTDtnEgnwG0mJQQ9I=
+cloud.google.com/go/compute v1.27.3/go.mod h1:5GuDo3l1k9CFhfIHK1sXqlqOW/iWX4/eBlO5FtxDhvQ=
+cloud.google.com/go/compute v1.27.4/go.mod h1:7JZS+h21ERAGHOy5qb7+EPyXlQwzshzrx1x6L9JhTqU=
+cloud.google.com/go/compute v1.27.5/go.mod h1:DfwDGujFTdSeiE8b8ZqadF/uxHFBz+ekGsk8Zfi9dTA=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU=
+cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE=
+cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso=
+cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis=
+cloud.google.com/go/contactcenterinsights v1.12.0/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis=
+cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis=
+cloud.google.com/go/contactcenterinsights v1.13.0/go.mod h1:ieq5d5EtHsu8vhe2y3amtZ+BE+AQwX5qAy7cpo0POsI=
+cloud.google.com/go/contactcenterinsights v1.13.1/go.mod h1:/3Ji8Rr1GS6d+/MOwlXM2gZPSuvTKIFyf8OG+7Pe5r8=
+cloud.google.com/go/contactcenterinsights v1.13.2/go.mod h1:AfkSB8t7mt2sIY6WpfO61nD9J9fcidIchtxm9FqJVXk=
+cloud.google.com/go/contactcenterinsights v1.13.4/go.mod h1:6OWSyQxeaQRxhkyMhtE+RFOOlsMcKOTukv8nnjxbNCQ=
+cloud.google.com/go/contactcenterinsights v1.13.5/go.mod h1:/27aGOSszuoT547CX4kTbF+4nMv3EIXN8+z+dJcMZco=
+cloud.google.com/go/contactcenterinsights v1.13.6/go.mod h1:mL+DbN3pMQGaAbDC4wZhryLciwSwHf5Tfk4Itr72Zyk=
+cloud.google.com/go/contactcenterinsights v1.13.7/go.mod h1:N5D7yxGknC0pDUC1OKOLShGQwpidKizKu3smt08153U=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4=
+cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04=
+cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4=
+cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4=
+cloud.google.com/go/container v1.28.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4=
+cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4=
+cloud.google.com/go/container v1.30.1/go.mod h1:vkbfX0EnAKL/vgVECs5BZn24e1cJROzgszJirRKQ4Bg=
+cloud.google.com/go/container v1.31.0/go.mod h1:7yABn5s3Iv3lmw7oMmyGbeV6tQj86njcTijkkGuvdZA=
+cloud.google.com/go/container v1.33.0/go.mod h1:u5QBBv/V9dVNK/NtTppCf6T4P8gzp+dQSwx2DqPnAKc=
+cloud.google.com/go/container v1.35.0/go.mod h1:02fCocALhTHLw4zwqrRaFrztjoQd53yZWFq0nvr+hQo=
+cloud.google.com/go/container v1.35.1/go.mod h1:udm8fgLm3TtpnjFN4QLLjZezAIIp/VnMo316yIRVRQU=
+cloud.google.com/go/container v1.37.0/go.mod h1:AFsgViXsfLvZHsgHrWQqPqfAPjCwXrZmLjKJ64uhLIw=
+cloud.google.com/go/container v1.37.2/go.mod h1:2ly7zpBmWtYjjuoB3fHyq8Gqrxaj2NIwzwVRpUcKYXk=
+cloud.google.com/go/container v1.37.3/go.mod h1:XKwtVfsTBsnZ9Ve1Pw2wkjk5kSjJqsHl3oBrbbi4w/M=
+cloud.google.com/go/container v1.38.0/go.mod h1:U0uPBvkVWOJGY/0qTVuPS7NeafFEUsHSPqT5pB8+fCY=
+cloud.google.com/go/container v1.38.1/go.mod h1:2r4Qiz6IG2LhRFfWhPNmrYD7yzdE2B2kghigVWoSw/g=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0=
+cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U=
+cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY=
+cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8=
+cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U=
+cloud.google.com/go/containeranalysis v0.11.4/go.mod h1:cVZT7rXYBS9NG1rhQbWL9pWbXCKHWJPYraE8/FTSYPE=
+cloud.google.com/go/containeranalysis v0.11.5/go.mod h1:DlgF5MaxAmGdq6F9wCUEp/JNx9lsr6QaQONFd4mxG8A=
+cloud.google.com/go/containeranalysis v0.11.6/go.mod h1:YRf7nxcTcN63/Kz9f86efzvrV33g/UV8JDdudRbYEUI=
+cloud.google.com/go/containeranalysis v0.11.8/go.mod h1:2ru4oxs6dCcaG3ZsmKAy4yMmG68ukOuS/IRCMEHYpLo=
+cloud.google.com/go/containeranalysis v0.12.0/go.mod h1:a3Yo1yk1Dv4nVmlxcJWOJDqsnzy5I1HmETg2UGlERhs=
+cloud.google.com/go/containeranalysis v0.12.1/go.mod h1:+/lcJIQSFt45TC0N9Nq7/dPbl0isk6hnC4EvBBqyXsM=
+cloud.google.com/go/containeranalysis v0.12.2/go.mod h1:XF/U1ZJ9kXfl8HWRzuWMtEtzBb8SvJ0zvySrxrQA3N0=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
+cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E=
+cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE=
+cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE=
+cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A=
+cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk=
+cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM=
+cloud.google.com/go/datacatalog v1.19.0/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM=
+cloud.google.com/go/datacatalog v1.19.2/go.mod h1:2YbODwmhpLM4lOFe3PuEhHK9EyTzQJ5AXgIy7EDKTEE=
+cloud.google.com/go/datacatalog v1.19.3/go.mod h1:ra8V3UAsciBpJKQ+z9Whkxzxv7jmQg1hfODr3N3YPJ4=
+cloud.google.com/go/datacatalog v1.20.0/go.mod h1:fSHaKjIroFpmRrYlwz9XBB2gJBpXufpnxyAKaT4w6L0=
+cloud.google.com/go/datacatalog v1.20.1/go.mod h1:Jzc2CoHudhuZhpv78UBAjMEg3w7I9jHA11SbRshWUjk=
+cloud.google.com/go/datacatalog v1.20.3/go.mod h1:AKC6vAy5urnMg5eJK3oUjy8oa5zMbiY33h125l8lmlo=
+cloud.google.com/go/datacatalog v1.20.4/go.mod h1:71PDwywIYkNgSXdUU3H0mkTp3j15aahfYJ1CY3DogtU=
+cloud.google.com/go/datacatalog v1.20.5/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI=
+cloud.google.com/go/datacatalog v1.21.0/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI=
+cloud.google.com/go/datacatalog v1.21.1/go.mod h1:23qsWWm592aQHwZ4or7VDjNhx7DeNklHAPE3GM47d1U=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw=
+cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo=
+cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE=
+cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w=
+cloud.google.com/go/dataflow v0.9.5/go.mod h1:udl6oi8pfUHnL0z6UN9Lf9chGqzDMVqcYTcZ1aPnCZQ=
+cloud.google.com/go/dataflow v0.9.6/go.mod h1:nO0hYepRlPlulvAHCJ+YvRPLnL/bwUswIbhgemAt6eM=
+cloud.google.com/go/dataflow v0.9.7/go.mod h1:3BjkOxANrm1G3+/EBnEsTEEgJu1f79mFqoOOZfz3v+E=
+cloud.google.com/go/dataflow v0.9.9/go.mod h1:Wk/92E1BvhV7qs/dWb+3dN26uGgyp/H1Jr5ZJxeD3dw=
+cloud.google.com/go/dataflow v0.9.10/go.mod h1:lkhCwyVAOR4cKx+TzaxFbfh0tJcBVqxyIN97TDc/OJ8=
+cloud.google.com/go/dataflow v0.9.11/go.mod h1:CCLufd7I4pPfyp54qMgil/volrL2ZKYjXeYLfQmBGJs=
+cloud.google.com/go/dataflow v0.9.12/go.mod h1:+2+80N2FOdDFWYhZdC2uTlX7GHP5kOH4vPNtfadggqQ=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M=
+cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM=
+cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg=
+cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs=
+cloud.google.com/go/dataform v0.9.2/go.mod h1:S8cQUwPNWXo7m/g3DhWHsLBoufRNn9EgFrMgne2j7cI=
+cloud.google.com/go/dataform v0.9.3/go.mod h1:c/TBr0tqx5UgBTmg3+5DZvLxX+Uy5hzckYZIngkuU/w=
+cloud.google.com/go/dataform v0.9.4/go.mod h1:jjo4XY+56UrNE0wsEQsfAw4caUs4DLJVSyFBDelRDtQ=
+cloud.google.com/go/dataform v0.9.6/go.mod h1:JKDPMfcYMu9oUMubIvvAGWTBX0sw4o/JIjCcczzbHmk=
+cloud.google.com/go/dataform v0.9.7/go.mod h1:zJp0zOSCKHgt2IxTQ90vNeDfT7mdqFA8ZzrYIsxTEM0=
+cloud.google.com/go/dataform v0.9.8/go.mod h1:cGJdyVdunN7tkeXHPNosuMzmryx55mp6cInYBgxN3oA=
+cloud.google.com/go/dataform v0.9.9/go.mod h1:QkiXNcrbFGjYtPtTkn700sfBiGIOG4mmpt26Ds8Ixeg=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI=
+cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0=
+cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE=
+cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM=
+cloud.google.com/go/datafusion v1.7.5/go.mod h1:bYH53Oa5UiqahfbNK9YuYKteeD4RbQSNMx7JF7peGHc=
+cloud.google.com/go/datafusion v1.7.6/go.mod h1:cDJfsWRYcaktcM1xfwkBOIccOaWJ5mG3zm95EaLtINA=
+cloud.google.com/go/datafusion v1.7.7/go.mod h1:qGTtQcUs8l51lFA9ywuxmZJhS4ozxsBSus6ItqCUWMU=
+cloud.google.com/go/datafusion v1.7.9/go.mod h1:ciYV8FL0JmrwgoJ7CH64oUHiI0oOf2VLE45LWKT51Ls=
+cloud.google.com/go/datafusion v1.7.10/go.mod h1:MYRJjIUs2kVTbYySSp4+foNyq2MfgKTLMcsquEjbapM=
+cloud.google.com/go/datafusion v1.7.11/go.mod h1:aU9zoBHgYmoPp4dzccgm/Gi4xWDMXodSZlNZ4WNeptw=
+cloud.google.com/go/datafusion v1.7.12/go.mod h1:ZUaEMjNVppM5ZasVt87QE0jN57O0LKY3uFe67EQ0GGI=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY=
+cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE=
+cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI=
+cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8=
+cloud.google.com/go/datalabeling v0.8.5/go.mod h1:IABB2lxQnkdUbMnQaOl2prCOfms20mcPxDBm36lps+s=
+cloud.google.com/go/datalabeling v0.8.6/go.mod h1:8gVcLufcZg0hzRnyMkf3UvcUen2Edo6abP6Rsz2jS6Q=
+cloud.google.com/go/datalabeling v0.8.7/go.mod h1:/PPncW5gxrU15UzJEGQoOT3IobeudHGvoExrtZ8ZBwo=
+cloud.google.com/go/datalabeling v0.8.9/go.mod h1:61QutR66VZFgN8boHhl4/FTfxenNzihykv18BgxwSrg=
+cloud.google.com/go/datalabeling v0.8.10/go.mod h1:8+IBTdU0te7w9b7BoZzUl05XgPvgqOrxQMzoP47skGM=
+cloud.google.com/go/datalabeling v0.8.11/go.mod h1:6IGUV3z7hlkAU5ndKVshv/8z+7pxE+k0qXsEjyzO1Xg=
+cloud.google.com/go/datalabeling v0.8.12/go.mod h1:IBbWnl80akCFj7jZ89/dRB/juuXig+QrQoLg24+vidg=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs=
+cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y=
+cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
+cloud.google.com/go/dataplex v1.11.2/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
+cloud.google.com/go/dataplex v1.13.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
+cloud.google.com/go/dataplex v1.14.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
+cloud.google.com/go/dataplex v1.14.1/go.mod h1:bWxQAbg6Smg+sca2+Ex7s8D9a5qU6xfXtwmq4BVReps=
+cloud.google.com/go/dataplex v1.14.2/go.mod h1:0oGOSFlEKef1cQeAHXy4GZPB/Ife0fz/PxBf+ZymA2U=
+cloud.google.com/go/dataplex v1.15.0/go.mod h1:R5rUQ3X18d6wcMraLOUIOTEULasL/1nvSrNF7C98eyg=
+cloud.google.com/go/dataplex v1.16.0/go.mod h1:OlBoytuQ56+7aUCC03D34CtoF/4TJ5SiIrLsBdDu87Q=
+cloud.google.com/go/dataplex v1.16.1/go.mod h1:szV2OpxfbmRBcw1cYq2ln8QsLR3FJq+EwTTIo+0FnyE=
+cloud.google.com/go/dataplex v1.18.0/go.mod h1:THLDVG07lcY1NgqVvjTV1mvec+rFHwpDwvSd+196MMc=
+cloud.google.com/go/dataplex v1.18.1/go.mod h1:G5+muC3D5rLSHG9uKACs5WfRtthIVwyUJSIXi2Wzp30=
+cloud.google.com/go/dataplex v1.18.2/go.mod h1:NuBpJJMGGQn2xctX+foHEDKRbizwuiHJamKvvSteY3Q=
+cloud.google.com/go/dataplex v1.18.3/go.mod h1:wcfVhUr529uu9aZSy9WIUUdOCrkB8M5Gikfh3YUuGtE=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4=
+cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY=
+cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o=
+cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4=
+cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY=
+cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY=
+cloud.google.com/go/dataproc/v2 v2.4.0/go.mod h1:3B1Ht2aRB8VZIteGxQS/iNSJGzt9+CA0WGnDVMEm7Z4=
+cloud.google.com/go/dataproc/v2 v2.4.1/go.mod h1:HrymsaRUG1FjK2G1sBRQrHMhgj5+ENUIAwRbL130D8o=
+cloud.google.com/go/dataproc/v2 v2.4.2/go.mod h1:smGSj1LZP3wtnsM9eyRuDYftNAroAl6gvKp/Wk64XDE=
+cloud.google.com/go/dataproc/v2 v2.5.1/go.mod h1:5s2CuQyTPX7e19ZRMLicfPFNgXrvsVct3xz94UvWFeQ=
+cloud.google.com/go/dataproc/v2 v2.5.2/go.mod h1:KCr6aYKulU4Am8utvRoXKe1L2hPkfX9Ox0m/rvenUjU=
+cloud.google.com/go/dataproc/v2 v2.5.3/go.mod h1:RgA5QR7v++3xfP7DlgY3DUmoDSTaaemPe0ayKrQfyeg=
+cloud.google.com/go/dataproc/v2 v2.5.4/go.mod h1:rpxihxKtWjPl8MDwjGiYgMva8nEWQSyzvl3e0p4ATt4=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8=
+cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs=
+cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4=
+cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c=
+cloud.google.com/go/dataqna v0.8.5/go.mod h1:vgihg1mz6n7pb5q2YJF7KlXve6tCglInd6XO0JGOlWM=
+cloud.google.com/go/dataqna v0.8.6/go.mod h1:3u2zPv3VwMUNW06oTRcSWS3+dDuxF/0w5hEWUCsLepw=
+cloud.google.com/go/dataqna v0.8.7/go.mod h1:hvxGaSvINAVH5EJJsONIwT1y+B7OQogjHPjizOFoWOo=
+cloud.google.com/go/dataqna v0.8.9/go.mod h1:wrw1SL/zLRlVgf0d8P0ZBJ2hhGaLbwoNRsW6m1mn64g=
+cloud.google.com/go/dataqna v0.8.10/go.mod h1:e6Ula5UmCrbT7jOI6zZDwHHtAsDdKHKDrHSkj0pDlAQ=
+cloud.google.com/go/dataqna v0.8.11/go.mod h1:74Icl1oFKKZXPd+W7YDtqJLa+VwLV6wZ+UF+sHo2QZQ=
+cloud.google.com/go/dataqna v0.8.12/go.mod h1:86JdVMqh3521atZY1P7waaa50vzIbErTLY7gsio+umg=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8=
+cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8=
+cloud.google.com/go/datastore v1.17.0/go.mod h1:RiRZU0G6VVlIVlv1HRo3vSAPFHULV0ddBNsXO+Sony4=
+cloud.google.com/go/datastore v1.17.1/go.mod h1:mtzZ2HcVtz90OVrEXXGDc2pO4NM1kiBQy8YV4qGe0ZM=
+cloud.google.com/go/datastore v1.18.1-0.20240822134219-d8887df4a12f/go.mod h1:XvmGl5dNXQvk9Xm0fwdA4YYicMtB9Gmxgc1g9gxMu18=
+cloud.google.com/go/datastore v1.19.0/go.mod h1:KGzkszuj87VT8tJe67GuB+qLolfsOt6bZq/KFuWaahc=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c=
+cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0=
+cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA=
+cloud.google.com/go/datastream v1.10.4/go.mod h1:7kRxPdxZxhPg3MFeCSulmAJnil8NJGGvSNdn4p1sRZo=
+cloud.google.com/go/datastream v1.10.5/go.mod h1:BmIPX19K+Pjho3+sR7Jtddmf+vluzLgaG7465xje/wg=
+cloud.google.com/go/datastream v1.10.6/go.mod h1:lPeXWNbQ1rfRPjBFBLUdi+5r7XrniabdIiEaCaAU55o=
+cloud.google.com/go/datastream v1.10.8/go.mod h1:6nkPjnk5Qr602Wq+YQ+/RWUOX5h4voMTz5abgEOYPCM=
+cloud.google.com/go/datastream v1.10.9/go.mod h1:LvUG7tBqMn9zDkgj5HlefDzaOth8ohVITF8qTtqAINw=
+cloud.google.com/go/datastream v1.10.10/go.mod h1:NqchuNjhPlISvWbk426/AU/S+Kgv7srlID9P5XOAbtg=
+cloud.google.com/go/datastream v1.10.11/go.mod h1:0d9em/ERaof15lY5JU3pWKF7ZJOHiPKcNJsTCBz6TX8=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g=
+cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw=
+cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g=
+cloud.google.com/go/deploy v1.15.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g=
+cloud.google.com/go/deploy v1.16.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g=
+cloud.google.com/go/deploy v1.17.0/go.mod h1:XBr42U5jIr64t92gcpOXxNrqL2PStQCXHuKK5GRUuYo=
+cloud.google.com/go/deploy v1.17.1/go.mod h1:SXQyfsXrk0fBmgBHRzBjQbZhMfKZ3hMQBw5ym7MN/50=
+cloud.google.com/go/deploy v1.17.2/go.mod h1:kKSAl1mab0Y27XlWGBrKNA5WOOrKo24KYzx2JRAfBL4=
+cloud.google.com/go/deploy v1.19.0/go.mod h1:BW9vAujmxi4b/+S7ViEuYR65GiEsqL6Mhf5S/9TeDRU=
+cloud.google.com/go/deploy v1.19.2/go.mod h1:i6zfU9FZkqFgWIvO2/gsodGU9qF4tF9mBgoMdfnf6as=
+cloud.google.com/go/deploy v1.19.3/go.mod h1:Ut73ILRKoxtcIWeRJyYwuhBAckuSE1KJXlSX38hf4B0=
+cloud.google.com/go/deploy v1.20.0/go.mod h1:PaOfS47VrvmYnxG5vhHg0KU60cKeWcqyLbMBjxS8DW8=
+cloud.google.com/go/deploy v1.21.0/go.mod h1:PaOfS47VrvmYnxG5vhHg0KU60cKeWcqyLbMBjxS8DW8=
+cloud.google.com/go/deploy v1.21.2/go.mod h1:BDBWUXXCBGrvYxVmSYXIRdNffioym0ChQWDQS0c/wA8=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
+cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M=
+cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M=
+cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk=
+cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c=
+cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ=
+cloud.google.com/go/dialogflow v1.47.0/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ=
+cloud.google.com/go/dialogflow v1.48.0/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ=
+cloud.google.com/go/dialogflow v1.48.1/go.mod h1:C1sjs2/g9cEwjCltkKeYp3FFpz8BOzNondEaAlCpt+A=
+cloud.google.com/go/dialogflow v1.48.2/go.mod h1:7A2oDf6JJ1/+hdpnFRfb/RjJUOh2X3rhIa5P8wQSEX4=
+cloud.google.com/go/dialogflow v1.49.0/go.mod h1:dhVrXKETtdPlpPhE7+2/k4Z8FRNUp6kMV3EW3oz/fe0=
+cloud.google.com/go/dialogflow v1.52.0/go.mod h1:mMh76X5D0Tg48PjGXaCveHpeKDnKz+dpwGln3WEN7DQ=
+cloud.google.com/go/dialogflow v1.53.0/go.mod h1:LqAvxq7bXiiGC3/DWIz9XXCxth2z2qpSnBAAmlNOj6U=
+cloud.google.com/go/dialogflow v1.54.0/go.mod h1:/YQLqB0bdDJl+zFKN+UNQsYUqLfWZb1HsJUQqMT7Q6k=
+cloud.google.com/go/dialogflow v1.54.2/go.mod h1:avkFNYog+U127jKpGzW1FOllBwZy3OfCz1K1eE9RGh8=
+cloud.google.com/go/dialogflow v1.54.3/go.mod h1:Sm5uznNq8Vrj7R+Uc84qz41gW2AXRZeWgvJ9owKZw9g=
+cloud.google.com/go/dialogflow v1.55.0/go.mod h1:0u0hSlJiFpMkMpMNoFrQETwDjaRm8Q8hYKv+jz5JeRA=
+cloud.google.com/go/dialogflow v1.56.0/go.mod h1:P1hIske3kr9pSl11nEP4tFfAu2E4US+7PpboeBhM4ag=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI=
+cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ=
+cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0=
+cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI=
+cloud.google.com/go/dlp v1.11.2/go.mod h1:9Czi+8Y/FegpWzgSfkRlyz+jwW6Te9Rv26P3UfU/h/w=
+cloud.google.com/go/dlp v1.12.0/go.mod h1:IUCT4ZMALgUqQyy2VMhvfYD/yFQFfbsgk1d7RoSLeSk=
+cloud.google.com/go/dlp v1.12.1/go.mod h1:RBUw3yjNSVcFoU8L4ECuxAx0lo1MrusfA4y46bp9vLw=
+cloud.google.com/go/dlp v1.13.0/go.mod h1:5T/dFtKOn2Q3QLnaKjjir7nEGA8K00WaqoKodLkbF/c=
+cloud.google.com/go/dlp v1.14.0/go.mod h1:4fvEu3EbLsHrgH3QFdFlTNIiCP5mHwdYhS/8KChDIC4=
+cloud.google.com/go/dlp v1.14.2/go.mod h1:+uwRt+6wZ3PL0wsmZ1cUAj0Mt9kyeV3WcIKPW03wJVU=
+cloud.google.com/go/dlp v1.14.3/go.mod h1:iyhOlJCSAGNP2z5YPoBjV+M9uhyiUuxjZDYqbvO3WMM=
+cloud.google.com/go/dlp v1.15.0/go.mod h1:LtPZxZAenBXKzvWIOB2hdHIXuEcK0wW0En8//u+/nNA=
+cloud.google.com/go/dlp v1.16.0/go.mod h1:LtPZxZAenBXKzvWIOB2hdHIXuEcK0wW0En8//u+/nNA=
+cloud.google.com/go/dlp v1.17.0/go.mod h1:9LuCkaCRZxWZ6HyqkmV3/PW0gKIVKoUVNjf0yMKVqMs=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc=
+cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc=
+cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo=
+cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y=
+cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g=
+cloud.google.com/go/documentai v1.23.6/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g=
+cloud.google.com/go/documentai v1.23.7/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g=
+cloud.google.com/go/documentai v1.23.8/go.mod h1:Vd/y5PosxCpUHmwC+v9arZyeMfTqBR9VIwOwIqQYYfA=
+cloud.google.com/go/documentai v1.25.0/go.mod h1:ftLnzw5VcXkLItp6pw1mFic91tMRyfv6hHEY5br4KzY=
+cloud.google.com/go/documentai v1.26.0/go.mod h1:9YCBSv7aXXS88lpXQWcJbLB490u3WeQdGt5aGCkHGkA=
+cloud.google.com/go/documentai v1.26.1/go.mod h1:ljZB6yyT/aKZc9tCd0WGtBxIMWu8ZCEO6UiNwirqLU0=
+cloud.google.com/go/documentai v1.28.1/go.mod h1:dOMSDsZQoyguECOiT1XeR4PoJeALsXqlJjLIEk+QneY=
+cloud.google.com/go/documentai v1.29.0/go.mod h1:3Qt8PMt3S8W6w3VeoYFraaMS2GJRrXFnvkyn+GpB1n0=
+cloud.google.com/go/documentai v1.30.0/go.mod h1:3Qt8PMt3S8W6w3VeoYFraaMS2GJRrXFnvkyn+GpB1n0=
+cloud.google.com/go/documentai v1.30.1/go.mod h1:RohRpAfvuv3uk3WQtXPpgQ3YABvzacWnasyJQb6AAPk=
+cloud.google.com/go/documentai v1.30.3/go.mod h1:aMxiOouLr36hyahLhI3OwAcsy7plOTiXR/RmK+MHbSg=
+cloud.google.com/go/documentai v1.30.4/go.mod h1:1UqovvxIySy/sQwZcU1O+tm4qA/jnzAwzZLRIhFmhSk=
+cloud.google.com/go/documentai v1.30.5/go.mod h1:5ajlDvaPyl9tc+K/jZE8WtYIqSXqAD33Z1YAYIjfad4=
+cloud.google.com/go/documentai v1.31.0/go.mod h1:5ajlDvaPyl9tc+K/jZE8WtYIqSXqAD33Z1YAYIjfad4=
+cloud.google.com/go/documentai v1.32.0/go.mod h1:X8skObtXBvR31QF+jERAu4mOCpRiJBaqbMvB3FLnMsA=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE=
+cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I=
+cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU=
+cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY=
+cloud.google.com/go/domains v0.9.5/go.mod h1:dBzlxgepazdFhvG7u23XMhmMKBjrkoUNaw0A8AQB55Y=
+cloud.google.com/go/domains v0.9.6/go.mod h1:hYaeMxsDZED5wuUwYHXf89+aXHJvh41+os8skywd8D4=
+cloud.google.com/go/domains v0.9.7/go.mod h1:u/yVf3BgfPJW3QDZl51qTJcDXo9PLqnEIxfGmGgbHEc=
+cloud.google.com/go/domains v0.9.9/go.mod h1:/ewEPIaNmTrElY7u9BZPcLPnoP1NJJXGvISDDapwVNU=
+cloud.google.com/go/domains v0.9.10/go.mod h1:8yArcduQ2fDThBQlnDSwxrkGRgduW8KK2Y/nlL1IU2o=
+cloud.google.com/go/domains v0.9.11/go.mod h1:efo5552kUyxsXEz30+RaoIS2lR7tp3M/rhiYtKXkhkk=
+cloud.google.com/go/domains v0.9.12/go.mod h1:2YamnZleyO3y5zYV+oASWAUoiHBJ0ZmkEcO6MXs5x3c=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk=
+cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4=
+cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA=
+cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE=
+cloud.google.com/go/edgecontainer v1.1.5/go.mod h1:rgcjrba3DEDEQAidT4yuzaKWTbkTI5zAMu3yy6ZWS0M=
+cloud.google.com/go/edgecontainer v1.2.0/go.mod h1:bI2foS+2fRbzBmkIQtrxNzeVv3zZZy780PFF96CiVxA=
+cloud.google.com/go/edgecontainer v1.2.1/go.mod h1:OE2D0lbkmGDVYLCvpj8Y0M4a4K076QB7E2JupqOR/qU=
+cloud.google.com/go/edgecontainer v1.2.3/go.mod h1:gMKe2JfE0OT0WuCJArzIndAmMWDPCIYGSWYIpJ6M7oM=
+cloud.google.com/go/edgecontainer v1.2.4/go.mod h1:QiHvO/Xc/8388oPuYZfHn9BpKx3dz1jWSi8Oex5MX6w=
+cloud.google.com/go/edgecontainer v1.2.5/go.mod h1:OAb6tElD3F3oBujFAup14PKOs9B/lYobTb6LARmoACY=
+cloud.google.com/go/edgecontainer v1.2.6/go.mod h1:4jyHt4ytGLL8P0S3m6umOL8bJhTw4tVnDUcPQCGlNMM=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/errorreporting v0.3.1/go.mod h1:6xVQXU1UuntfAf+bVkFk6nld41+CPyF2NSPCyXE3Ztk=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4=
+cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo=
+cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM=
+cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM=
+cloud.google.com/go/essentialcontacts v1.6.6/go.mod h1:XbqHJGaiH0v2UvtuucfOzFXN+rpL/aU5BCZLn4DYl1Q=
+cloud.google.com/go/essentialcontacts v1.6.7/go.mod h1:5577lqt2pvnx9n4zP+eJSSWL02KLmQvjJPYknHdAbZg=
+cloud.google.com/go/essentialcontacts v1.6.8/go.mod h1:EHONVDSum2xxG2p+myyVda/FwwvGbY58ZYC4XqI/lDQ=
+cloud.google.com/go/essentialcontacts v1.6.10/go.mod h1:wQlXvEb/0hB0C0d4H6/90P8CiZcYewkvJ3VoUVFPi4E=
+cloud.google.com/go/essentialcontacts v1.6.11/go.mod h1:qpdkYSdPY4C69zprW20nKu+5DsED/Gwf1KtFHUSzrC0=
+cloud.google.com/go/essentialcontacts v1.6.12/go.mod h1:UGhWTIYewH8Ma4wDRJp8cMAHUCeAOCKsuwd6GLmmQLc=
+cloud.google.com/go/essentialcontacts v1.6.13/go.mod h1:52AB7Qmi6TBzA/lsSZER7oi4jR/pY0TXC0lNaaAyfA4=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y=
+cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM=
+cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg=
+cloud.google.com/go/eventarc v1.13.4/go.mod h1:zV5sFVoAa9orc/52Q+OuYUG9xL2IIZTbbuTHC6JSY8s=
+cloud.google.com/go/eventarc v1.13.5/go.mod h1:wrZcXnSOZk/AVbBYT5GpOa5QPuQFzSxiXKsKnynoPes=
+cloud.google.com/go/eventarc v1.13.6/go.mod h1:QReOaYnDNdjwAQQWNC7nfr63WnaKFUw7MSdQ9PXJYj0=
+cloud.google.com/go/eventarc v1.13.8/go.mod h1:Xq3SsMoOAn7RmacXgJO7kq818iRLFF0bVhH780qlmTs=
+cloud.google.com/go/eventarc v1.13.9/go.mod h1:Jn2EBCgvGXeqndphk0nUVgJm4ZJOhxx4yYcSasvNrh4=
+cloud.google.com/go/eventarc v1.13.10/go.mod h1:KlCcOMApmUaqOEZUpZRVH+p0nnnsY1HaJB26U4X5KXE=
+cloud.google.com/go/eventarc v1.13.11/go.mod h1:1PJ+icw2mJYgqUsICg7Cr8gzMw38f3THiSzVSNPFrNQ=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4=
+cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE=
+cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0=
+cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI=
+cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI=
+cloud.google.com/go/filestore v1.8.1/go.mod h1:MbN9KcaM47DRTIuLfQhJEsjaocVebNtNQhSLhKCF5GM=
+cloud.google.com/go/filestore v1.8.2/go.mod h1:QU7EKJP/xmCtzIhxNVLfv/k1QBKHXTbbj9512kwUT1I=
+cloud.google.com/go/filestore v1.8.3/go.mod h1:QTpkYpKBF6jlPRmJwhLqXfJQjVrQisplyb4e2CwfJWc=
+cloud.google.com/go/filestore v1.8.5/go.mod h1:o8KvHyl5V30kIdrPX6hE+RknscXCUFXWSxYsEWeFfRU=
+cloud.google.com/go/filestore v1.8.6/go.mod h1:ztH4U+aeH5vWtiyEd4+Dc56L2yRk7EIm0+PAR+9m5Jc=
+cloud.google.com/go/filestore v1.8.7/go.mod h1:dKfyH0YdPAKdYHqAR/bxZeil85Y5QmrEVQwIYuRjcXI=
+cloud.google.com/go/filestore v1.8.8/go.mod h1:gNT7bpDZSOFWCnRirQw1IehZtA7blbzkO3Q8VQfkeZ0=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
+cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
+cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8=
+cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ=
+cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk=
+cloud.google.com/go/firestore v1.16.0/go.mod h1:+22v/7p+WNBSQwdSwP57vz47aZiY+HrDkrOsJNhk7rg=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE=
+cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE=
+cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug=
+cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I=
+cloud.google.com/go/functions v1.16.0/go.mod h1:nbNpfAG7SG7Duw/o1iZ6ohvL7mc6MapWQVpqtM29n8k=
+cloud.google.com/go/functions v1.16.1/go.mod h1:WcQy3bwDw6KblOuj+khLyQbsi8aupUrZUrPEKTtVaSQ=
+cloud.google.com/go/functions v1.16.2/go.mod h1:+gMvV5E3nMb9EPqX6XwRb646jTyVz8q4yk3DD6xxHpg=
+cloud.google.com/go/functions v1.16.4/go.mod h1:uDp5MbH0kCtXe3uBluq3Zi7bEDuHqcn60mAHxUsNezI=
+cloud.google.com/go/functions v1.16.5/go.mod h1:ds5f+dyMN4kCkTWTLpQl8wMi0sLRuJWrQaWr5eFlUnQ=
+cloud.google.com/go/functions v1.16.6/go.mod h1:wOzZakhMueNQaBUJdf0yjsJIe0GBRu+ZTvdSTzqHLs0=
+cloud.google.com/go/functions v1.18.0/go.mod h1:r8uxxI35hdP2slfTjGJvx04NRy8sP/EXUMZ0NYfBd+w=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU=
+cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU=
+cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk=
+cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I=
+cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI=
+cloud.google.com/go/gkebackup v1.3.5/go.mod h1:KJ77KkNN7Wm1LdMopOelV6OodM01pMuK2/5Zt1t4Tvc=
+cloud.google.com/go/gkebackup v1.4.0/go.mod h1:FpsE7Qcio7maQ5bPMvacN+qoXTPWrxHe4fm44RWa67U=
+cloud.google.com/go/gkebackup v1.5.0/go.mod h1:eLaf/+n8jEmIvOvDriGjo99SN7wRvVadoqzbZu0WzEw=
+cloud.google.com/go/gkebackup v1.5.2/go.mod h1:ZuWJKacdXtjiO8ry9RrdT57gvcsU7c7/FTqqwjdNUjk=
+cloud.google.com/go/gkebackup v1.5.3/go.mod h1:fzWJXO5v0AzcC3J5KgCTpEcB0uvcC+e0YqIRVYQR4sE=
+cloud.google.com/go/gkebackup v1.5.4/go.mod h1:V+llvHlRD0bCyrkYaAMJX+CHralceQcaOWjNQs8/Ymw=
+cloud.google.com/go/gkebackup v1.5.5/go.mod h1:C/XZ2LoG+V97xGc18oCPniO754E0iHt0OXqKatawBMM=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw=
+cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY=
+cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo=
+cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw=
+cloud.google.com/go/gkeconnect v0.8.5/go.mod h1:LC/rS7+CuJ5fgIbXv8tCD/mdfnlAadTaUufgOkmijuk=
+cloud.google.com/go/gkeconnect v0.8.6/go.mod h1:4/o9sXLLsMl2Rw2AyXjtVET0RMk4phdFJuBX45jRRHc=
+cloud.google.com/go/gkeconnect v0.8.7/go.mod h1:iUH1jgQpTyNFMK5LgXEq2o0beIJ2p7KKUUFerkf/eGc=
+cloud.google.com/go/gkeconnect v0.8.9/go.mod h1:gl758q5FLXewQZIsxQ7vHyYmLcGBuubvQO6J3yFDh08=
+cloud.google.com/go/gkeconnect v0.8.10/go.mod h1:2r9mjewv4bAEg0VXNqc7uJA2vWuDHy/44IzstIikFH8=
+cloud.google.com/go/gkeconnect v0.8.11/go.mod h1:ejHv5ehbceIglu1GsMwlH0nZpTftjxEY6DX7tvaM8gA=
+cloud.google.com/go/gkeconnect v0.8.12/go.mod h1:+SpnnnUx4Xs/mWBJbqC7Mlu9Vv7riQlHSDS1T1ek2+U=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY=
+cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY=
+cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8=
+cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc=
+cloud.google.com/go/gkehub v0.14.5/go.mod h1:6bzqxM+a+vEH/h8W8ec4OJl4r36laxTs3A/fMNHJ0wA=
+cloud.google.com/go/gkehub v0.14.6/go.mod h1:SD3/ihO+7/vStQEwYA1S/J9mouohy7BfhM/gGjAmJl0=
+cloud.google.com/go/gkehub v0.14.7/go.mod h1:NLORJVTQeCdxyAjDgUwUp0A6BLEaNLq84mCiulsM4OE=
+cloud.google.com/go/gkehub v0.14.9/go.mod h1:W2rDU2n2xgMpf3/BqpT6ffUX/I8yez87rrW/iGRz6Kk=
+cloud.google.com/go/gkehub v0.14.10/go.mod h1:+bqT9oyCDQG2Dc2pUJKYVNJGvrKgIfm7c+hk9IlDzJU=
+cloud.google.com/go/gkehub v0.14.11/go.mod h1:CsmDJ4qbBnSPkoBltEubK6qGOjG0xNfeeT5jI5gCnRQ=
+cloud.google.com/go/gkehub v0.14.12/go.mod h1:CNYNBCqjIkE9L70gzbRxZOsc++Wcp2oCLkfuytOFqRM=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8=
+cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo=
+cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0=
+cloud.google.com/go/gkemulticloud v1.1.0/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0=
+cloud.google.com/go/gkemulticloud v1.1.1/go.mod h1:C+a4vcHlWeEIf45IB5FFR5XGjTeYhF83+AYIpTy4i2Q=
+cloud.google.com/go/gkemulticloud v1.1.2/go.mod h1:QhdIrilhqieDJJzOyfMPBqcfDVntENYGwqSeX2ZuIDE=
+cloud.google.com/go/gkemulticloud v1.2.0/go.mod h1:iN5wBxTLPR6VTBWpkUsOP2zuPOLqZ/KbgG1bZir1Cng=
+cloud.google.com/go/gkemulticloud v1.2.2/go.mod h1:VMsMYDKpUVYNrhese31TVJMVXPLEtFT/AnIarqlcwVo=
+cloud.google.com/go/gkemulticloud v1.2.3/go.mod h1:CR97Vcd9XdDLZQtMPfXtbFWRxfIFuO9K6q7oF6+moco=
+cloud.google.com/go/gkemulticloud v1.2.4/go.mod h1:PjTtoKLQpIRztrL+eKQw8030/S4c7rx/WvHydDJlpGE=
+cloud.google.com/go/gkemulticloud v1.2.5/go.mod h1:zVRNlO7/jFXmvrkBd+UfhI2T7ZBb+N3b3lt/3K60uS0=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
+cloud.google.com/go/grafeas v0.3.4/go.mod h1:A5m316hcG+AulafjAbPKXBO/+I5itU4LOdKO2R/uDIc=
+cloud.google.com/go/grafeas v0.3.5/go.mod h1:y54iTBcI+lgUdI+kAPKb8jtPqeTkA2dsYzWSrQtpc5s=
+cloud.google.com/go/grafeas v0.3.6/go.mod h1:to6ECAPgRO2xeqD8ISXHc70nObJuaKZThreQOjeOH3o=
+cloud.google.com/go/grafeas v0.3.9/go.mod h1:j8hBcywIqtJ3/3QP9yYB/LqjLWBM9dXumBa+xplvyG0=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY=
+cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU=
+cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48=
+cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE=
+cloud.google.com/go/gsuiteaddons v1.6.5/go.mod h1:Lo4P2IvO8uZ9W+RaC6s1JVxo42vgy+TX5a6hfBZ0ubs=
+cloud.google.com/go/gsuiteaddons v1.6.6/go.mod h1:JmAp1/ojGgHtSe5d6ZPkOwJbYP7An7DRBkhSJ1aer8I=
+cloud.google.com/go/gsuiteaddons v1.6.7/go.mod h1:u+sGBvr07OKNnOnQiB/Co1q4U2cjo50ERQwvnlcpNis=
+cloud.google.com/go/gsuiteaddons v1.6.9/go.mod h1:qITZZoLzQhMQ6Re+izKEvz4C+M1AP13S+XuEpS26824=
+cloud.google.com/go/gsuiteaddons v1.6.10/go.mod h1:daIpNyqugkch134oS116DXGEVrLUt0kSdqvgi0U1DD8=
+cloud.google.com/go/gsuiteaddons v1.6.11/go.mod h1:U7mk5PLBzDpHhgHv5aJkuvLp9RQzZFpa8hgWAB+xVIk=
+cloud.google.com/go/gsuiteaddons v1.6.12/go.mod h1:hqTWzMXCgS/BPuyiWHzDBZC4K3+a9lcJWBUR+i+6D7A=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
+cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8=
+cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE=
+cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8=
+cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
+cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
+cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps=
+cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ=
+cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg=
+cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ=
+cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY=
+cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q=
+cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI=
+cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw=
+cloud.google.com/go/iap v1.9.4/go.mod h1:vO4mSq0xNf/Pu6E5paORLASBwEmphXEjgCFg7aeNu1w=
+cloud.google.com/go/iap v1.9.5/go.mod h1:4zaAOm66mId/50vqRF7ZPDeCjvHQJSVAXD/mkUWo4Zk=
+cloud.google.com/go/iap v1.9.6/go.mod h1:YiK+tbhDszhaVifvzt2zTEF2ch9duHtp6xzxj9a0sQk=
+cloud.google.com/go/iap v1.9.8/go.mod h1:jQzSbtpYRbBoMdOINr/OqUxBY9rhyqLx04utTCmJ6oo=
+cloud.google.com/go/iap v1.9.9/go.mod h1:7I7ftlLPPU8du0E8jW3koaYkNcX1NLqSDU9jQFRwF04=
+cloud.google.com/go/iap v1.9.10/go.mod h1:pO0FEirrhMOT1H0WVwpD5dD9r3oBhvsunyBQtNXzzc0=
+cloud.google.com/go/iap v1.9.11/go.mod h1:UcvTLqySIc8C3Dw3JPZ7QihzzxVQJ7/KUOL9MjxiPZk=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw=
+cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk=
+cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU=
+cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI=
+cloud.google.com/go/ids v1.4.5/go.mod h1:p0ZnyzjMWxww6d2DvMGnFwCsSxDJM666Iir1bK1UuBo=
+cloud.google.com/go/ids v1.4.6/go.mod h1:EJ1554UwEEs8HCHVnXPGn21WouM0uFvoq8UvEEr2ng4=
+cloud.google.com/go/ids v1.4.7/go.mod h1:yUkDC71u73lJoTaoONy0dsA0T7foekvg6ZRg9IJL0AA=
+cloud.google.com/go/ids v1.4.9/go.mod h1:1pL+mhlvtUNphwBSK91yO8NoTVQYwOpqim1anIVBwbM=
+cloud.google.com/go/ids v1.4.10/go.mod h1:438ouAjmw7c4/3Q+KbQxuJTU3jek5xo6cVH7EduiKXs=
+cloud.google.com/go/ids v1.4.11/go.mod h1:+ZKqWELpJm8WcRRsSvKZWUdkriu4A3XsLLzToTv3418=
+cloud.google.com/go/ids v1.4.12/go.mod h1:SH2yjlk9fKWrRgob/E0Gd1wM+VFztfTdR+LaJRDMiPw=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk=
+cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg=
+cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I=
+cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk=
+cloud.google.com/go/iot v1.7.5/go.mod h1:nq3/sqTz3HGaWJi1xNiX7F41ThOzpud67vwk0YsSsqs=
+cloud.google.com/go/iot v1.7.6/go.mod h1:IMhFVfRGn5OqrDJ9Obu0rC5VIr2+SvSyUxQPHkXYuW0=
+cloud.google.com/go/iot v1.7.7/go.mod h1:tr0bCOSPXtsg64TwwZ/1x+ReTWKlQRVXbM+DnrE54yM=
+cloud.google.com/go/iot v1.7.9/go.mod h1:1fi6x4CexbygNgRPn+tcxCjOZFTl+4G6Adbo6sLPR7c=
+cloud.google.com/go/iot v1.7.10/go.mod h1:rVBZ3srfCH4yPr2CPkxu3tB/c0avx0KV9K68zVNAh4Q=
+cloud.google.com/go/iot v1.7.11/go.mod h1:0vZJOqFy9kVLbUXwTP95e0dWHakfR4u5IWqsKMGIfHk=
+cloud.google.com/go/iot v1.7.12/go.mod h1:8ntlg5OWnVodAsbs0KDLY58tKEroy+CYciDX/ONxpl4=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
+cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM=
+cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w=
+cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ=
+cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc=
+cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
+cloud.google.com/go/kms v1.15.6/go.mod h1:yF75jttnIdHfGBoE51AKsD/Yqf+/jICzB9v1s1acsms=
+cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI=
+cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs=
+cloud.google.com/go/kms v1.17.1/go.mod h1:DCMnCF/apA6fZk5Cj4XsD979OyHAqFasPuA5Sd0kGlQ=
+cloud.google.com/go/kms v1.18.0/go.mod h1:DyRBeWD/pYBMeyiaXFa/DGNyxMDL3TslIKb8o/JkLkw=
+cloud.google.com/go/kms v1.18.2/go.mod h1:YFz1LYrnGsXARuRePL729oINmN5J/5e7nYijgvfiIeY=
+cloud.google.com/go/kms v1.18.3/go.mod h1:y/Lcf6fyhbdn7MrG1VaDqXxM8rhOBc5rWcWAhcvZjQU=
+cloud.google.com/go/kms v1.18.4/go.mod h1:SG1bgQ3UWW6/KdPo9uuJnzELXY5YTTMJtDYvajiQ22g=
+cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0=
+cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ=
+cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U=
+cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4=
+cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc=
+cloud.google.com/go/language v1.12.3/go.mod h1:evFX9wECX6mksEva8RbRnr/4wi/vKGYnAJrTRXU8+f8=
+cloud.google.com/go/language v1.12.4/go.mod h1:Us0INRv/CEbrk2s8IBZcHaZjSBmK+bRlX4FUYZrD4I8=
+cloud.google.com/go/language v1.12.5/go.mod h1:w/6a7+Rhg6Bc2Uzw6thRdKKNjnOzfKTJuxzD0JZZ0nM=
+cloud.google.com/go/language v1.12.7/go.mod h1:4s/11zABvI/gv+li/+ICe+cErIaN9hYmilf9wrc5Py0=
+cloud.google.com/go/language v1.12.8/go.mod h1:3706JYCNJKvNXZZzcf7PGUMR2IuEYXQ0o7KqyOLqw+s=
+cloud.google.com/go/language v1.12.9/go.mod h1:B9FbD17g1EkilctNGUDAdSrBHiFOlKNErLljO7jplDU=
+cloud.google.com/go/language v1.13.0/go.mod h1:B9FbD17g1EkilctNGUDAdSrBHiFOlKNErLljO7jplDU=
+cloud.google.com/go/language v1.13.1/go.mod h1:PY/DAdVW0p2MWl2Lut31AJddEmQBBXMnPUM8nkl/WfA=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc=
+cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA=
+cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM=
+cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA=
+cloud.google.com/go/lifesciences v0.9.5/go.mod h1:OdBm0n7C0Osh5yZB7j9BXyrMnTRGBJIZonUMxo5CzPw=
+cloud.google.com/go/lifesciences v0.9.6/go.mod h1:BkNWYU0tPZbwpy76RE4biZajWFe6NvWwEAaIlNiKXdE=
+cloud.google.com/go/lifesciences v0.9.7/go.mod h1:FQ713PhjAOHqUVnuwsCe1KPi9oAdaTfh58h1xPiW13g=
+cloud.google.com/go/lifesciences v0.9.9/go.mod h1:4c8eLVKz7/FPw6lvoHx2/JQX1rVM8+LlYmBp8h5H3MQ=
+cloud.google.com/go/lifesciences v0.9.10/go.mod h1:zm5Y46HXN/ZoVdQ8HhXJvXG+m4De1HoJye62r/DFXoU=
+cloud.google.com/go/lifesciences v0.9.11/go.mod h1:NMxu++FYdv55TxOBEvLIhiAvah8acQwXsz79i9l9/RY=
+cloud.google.com/go/lifesciences v0.9.12/go.mod h1:si0In2nxVPtZnSoDNlEgSV4BJWxxlkdgKh+LXPYMf4w=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI=
+cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE=
+cloud.google.com/go/logging v1.10.0/go.mod h1:EHOwcxlltJrYGqMGfghSet736KR3hX1MAj614mrMk9I=
+cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ=
+cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc=
+cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
+cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs=
+cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y=
+cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI=
+cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s=
+cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA=
+cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
+cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c=
+cloud.google.com/go/longrunning v0.5.10/go.mod h1:tljz5guTr5oc/qhlUjBlk7UAIFMOGuPNxkNDZXlLics=
+cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4=
+cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak=
+cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8=
+cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4=
+cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM=
+cloud.google.com/go/managedidentities v1.6.5/go.mod h1:fkFI2PwwyRQbjLxlm5bQ8SjtObFMW3ChBGNqaMcgZjI=
+cloud.google.com/go/managedidentities v1.6.6/go.mod h1:0+0qF22qx8o6eeaZ/Ku7HmHv9soBHD1piyNHgAP+c20=
+cloud.google.com/go/managedidentities v1.6.7/go.mod h1:UzslJgHnc6luoyx2JV19cTCi2Fni/7UtlcLeSYRzTV8=
+cloud.google.com/go/managedidentities v1.6.9/go.mod h1:R7+78iH2j/SCTInutWINxGxEY0PH5rpbWt6uRq0Tn+Y=
+cloud.google.com/go/managedidentities v1.6.10/go.mod h1:Dg+K/AgKJtOyDjrrMGh4wFrEmtlUUcoEtDdC/WsZxw4=
+cloud.google.com/go/managedidentities v1.6.11/go.mod h1:df+8oZ1D4Eri+NrcpuiR5Hd6MGgiMqn0ZCzNmBYPS0A=
+cloud.google.com/go/managedidentities v1.6.12/go.mod h1:7KrCfXlxPw85nhlEYF3o5oLC8RtQakMAIGKNiNN3OAg=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
+cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
+cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY=
+cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4=
+cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18=
+cloud.google.com/go/maps v1.6.2/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18=
+cloud.google.com/go/maps v1.6.3/go.mod h1:VGAn809ADswi1ASofL5lveOHPnE6Rk/SFTTBx1yuOLw=
+cloud.google.com/go/maps v1.6.4/go.mod h1:rhjqRy8NWmDJ53saCfsXQ0LKwBHfi6OSh5wkq6BaMhI=
+cloud.google.com/go/maps v1.7.0/go.mod h1:LqhDUCNSDAH6MvvubxDm7lPfHPhn6MduIaGCZ4iY1Ig=
+cloud.google.com/go/maps v1.7.1/go.mod h1:fri+i4pO41ZUZ/Nrz3U9hNEtXsv5SROMFP2AwAHFSX8=
+cloud.google.com/go/maps v1.10.0/go.mod h1:lbl3+NkLJ88H4qv3rO8KWOHOYhJiOwsqHOAXMHb9seA=
+cloud.google.com/go/maps v1.11.0/go.mod h1:XcSsd8lg4ZhLPCtJ2YHcu/xLVePBzZOlI7GmR2cRCws=
+cloud.google.com/go/maps v1.11.1/go.mod h1:XcSsd8lg4ZhLPCtJ2YHcu/xLVePBzZOlI7GmR2cRCws=
+cloud.google.com/go/maps v1.11.3/go.mod h1:4iKNrUzFISQ4RoiWCqIFEAAVtgKb2oQ09AVx8GheOUg=
+cloud.google.com/go/maps v1.11.4/go.mod h1:RQ2Vv/f2HKGlvCtj8xyJp8gJbVqh/CWy0xR2Nfe8c0s=
+cloud.google.com/go/maps v1.11.5/go.mod h1:MOS/NN0L6b7Kumr8bLux9XTpd8+D54DYxBMUjq+XfXs=
+cloud.google.com/go/maps v1.11.6/go.mod h1:MOS/NN0L6b7Kumr8bLux9XTpd8+D54DYxBMUjq+XfXs=
+cloud.google.com/go/maps v1.11.7/go.mod h1:CEGHM/Q0epp0oWFO7kiEk8oDGUUhjd1sj4Rcd/4iwGU=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig=
+cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8=
+cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y=
+cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4=
+cloud.google.com/go/mediatranslation v0.8.5/go.mod h1:y7kTHYIPCIfgyLbKncgqouXJtLsU+26hZhHEEy80fSs=
+cloud.google.com/go/mediatranslation v0.8.6/go.mod h1:zI2ZvRRtrGimH572cwYtmq8t1elKbUGVVw4MAXIC4UQ=
+cloud.google.com/go/mediatranslation v0.8.7/go.mod h1:6eJbPj1QJwiCP8R4K413qMx6ZHZJUi9QFpApqY88xWU=
+cloud.google.com/go/mediatranslation v0.8.9/go.mod h1:3MjXTUsEzrMC9My6e9o7TOmgIUGlyrkVAxjzcmxBUdU=
+cloud.google.com/go/mediatranslation v0.8.10/go.mod h1:sCTNVpO4Yh9LbkjelsGakWBi93u9THKfKQLSGSLS7rA=
+cloud.google.com/go/mediatranslation v0.8.11/go.mod h1:3sNEm0fx61eHk7rfzBzrljVV9XKr931xI3OFacQBVFg=
+cloud.google.com/go/mediatranslation v0.8.12/go.mod h1:owrIOMto4hzsoqkZe95ePEiMJv4JF7/tgEgWuHC+t40=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA=
+cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A=
+cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo=
+cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0=
+cloud.google.com/go/memcache v1.10.5/go.mod h1:/FcblbNd0FdMsx4natdj+2GWzTq+cjZvMa1I+9QsuMA=
+cloud.google.com/go/memcache v1.10.6/go.mod h1:4elGf6MwGszZCM0Yopp15qmBoo+Y8M7wg7QRpSM8pzA=
+cloud.google.com/go/memcache v1.10.7/go.mod h1:SrU6+QBhvXJV0TA59+B3oCHtLkPx37eqdKmRUlmSE1k=
+cloud.google.com/go/memcache v1.10.9/go.mod h1:06evGxt9E1Mf/tYsXJNdXuRj5qzspVd0Tt18kXYDD5c=
+cloud.google.com/go/memcache v1.10.10/go.mod h1:UXnN6UYNoNM6RTExZ7/iW9c2mAaeJjy7R7uaplNRmIc=
+cloud.google.com/go/memcache v1.10.11/go.mod h1:ubJ7Gfz/xQawQY5WO5pht4Q0dhzXBFeEszAeEJnwBHU=
+cloud.google.com/go/memcache v1.10.12/go.mod h1:OfG2zgIXVTNJy2UKDF4o4irKxBqTx9RMZhGKJ/hLJUI=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk=
+cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU=
+cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA=
+cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE=
+cloud.google.com/go/metastore v1.13.4/go.mod h1:FMv9bvPInEfX9Ac1cVcRXp8EBBQnBcqH6gz3KvJ9BAE=
+cloud.google.com/go/metastore v1.13.5/go.mod h1:dmsJzIdQcJrpmRGhEaii3EhVq1JuhI0bxSBoy7A8hcQ=
+cloud.google.com/go/metastore v1.13.6/go.mod h1:OBCVMCP7X9vA4KKD+5J4Q3d+tiyKxalQZnksQMq5MKY=
+cloud.google.com/go/metastore v1.13.8/go.mod h1:2uLJBAXn5EDYJx9r7mZtxZifCKpakZUCvNfzI7ejUiE=
+cloud.google.com/go/metastore v1.13.9/go.mod h1:KgRseDRcS7Um/mNLbRHJjXZQrK8MqlGSyEga7T/Vs1A=
+cloud.google.com/go/metastore v1.13.10/go.mod h1:RPhMnBxUmTLT1fN7fNbPqtH5EoGHueDxubmJ1R1yT84=
+cloud.google.com/go/metastore v1.13.11/go.mod h1:aeP+V0Xs3SLqu4mrQWRyuSg5+fdyPq+kdu1xclnR8y8=
+cloud.google.com/go/monitoring v0.1.0/go.mod h1:Hpm3XfzJv+UTiXzCG5Ffp0wijzHTC7Cv4eR7o3x/fEE=
+cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM=
+cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY=
+cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4=
+cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc=
+cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw=
+cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw=
+cloud.google.com/go/monitoring v1.17.1/go.mod h1:SJzPMakCF0GHOuKEH/r4hxVKF04zl+cRPQyc3d/fqII=
+cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg=
+cloud.google.com/go/monitoring v1.18.1/go.mod h1:52hTzJ5XOUMRm7jYi7928aEdVxBEmGwA0EjNJXIBvt8=
+cloud.google.com/go/monitoring v1.19.0/go.mod h1:25IeMR5cQ5BoZ8j1eogHE5VPJLlReQ7zFp5OiLgiGZw=
+cloud.google.com/go/monitoring v1.20.1/go.mod h1:FYSe/brgfuaXiEzOQFhTjsEsJv+WePyK71X7Y8qo6uQ=
+cloud.google.com/go/monitoring v1.20.2/go.mod h1:36rpg/7fdQ7NX5pG5x1FA7cXTVXusOp6Zg9r9e1+oek=
+cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU=
+cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E=
+cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk=
+cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk=
+cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug=
+cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo=
+cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek=
+cloud.google.com/go/networkconnectivity v1.14.4/go.mod h1:PU12q++/IMnDJAB+3r+tJtuCXCfwfN+C6Niyj6ji1Po=
+cloud.google.com/go/networkconnectivity v1.14.5/go.mod h1:Wy28mxRApI1uVwA9iHaYYxGNe74cVnSP311bCUJEpBc=
+cloud.google.com/go/networkconnectivity v1.14.6/go.mod h1:/azB7+oCSmyBs74Z26EogZ2N3UcXxdCHkCPcz8G32bU=
+cloud.google.com/go/networkconnectivity v1.14.8/go.mod h1:QQ/XTMk7U5fzv1cVNUCQJEjpkVEE+nYOK7mg3hVTuiI=
+cloud.google.com/go/networkconnectivity v1.14.9/go.mod h1:J1JgZDeSi/elFfOSLkMoY9REuGhoNXqOFuI0cfyS6WY=
+cloud.google.com/go/networkconnectivity v1.14.10/go.mod h1:f7ZbGl4CV08DDb7lw+NmMXQTKKjMhgCEEwFbEukWuOY=
+cloud.google.com/go/networkconnectivity v1.14.11/go.mod h1:XRA6nT7ygTN09gAtCRsFhbqn3u7/9LIUn6S+5G4fs50=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0=
+cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw=
+cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI=
+cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw=
+cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU=
+cloud.google.com/go/networkmanagement v1.9.4/go.mod h1:daWJAl0KTFytFL7ar33I6R/oNBH8eEOX/rBNHrC/8TA=
+cloud.google.com/go/networkmanagement v1.13.0/go.mod h1:LcwkOGJmWtjM4yZGKfN1kSoEj/OLGFpZEQefWofHFKI=
+cloud.google.com/go/networkmanagement v1.13.2/go.mod h1:24VrV/5HFIOXMEtVQEUoB4m/w8UWvUPAYjfnYZcBc4c=
+cloud.google.com/go/networkmanagement v1.13.4/go.mod h1:dGTeJfDPQv0yGDt6gncj4XAPwxktjpCn5ZxQajStW8g=
+cloud.google.com/go/networkmanagement v1.13.5/go.mod h1:znPuYKLqWJLzLI9feH6ex+Mq+6VlexfiUR8F6sFOtGo=
+cloud.google.com/go/networkmanagement v1.13.6/go.mod h1:WXBijOnX90IFb6sberjnGrVtZbgDNcPDUYOlGXmG8+4=
+cloud.google.com/go/networkmanagement v1.13.7/go.mod h1:foi1eLe3Ayydrr63O3ViMwG1AGS3/BxRSmXpAqMFhkY=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ=
+cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI=
+cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0=
+cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w=
+cloud.google.com/go/networksecurity v0.9.5/go.mod h1:KNkjH/RsylSGyyZ8wXpue8xpCEK+bTtvof8SBfIhMG8=
+cloud.google.com/go/networksecurity v0.9.6/go.mod h1:SZB02ji/2uittsqoAXu9PBqGG9nF9PuxPgtezQfihSA=
+cloud.google.com/go/networksecurity v0.9.7/go.mod h1:aB6UiPnh/l32+TRvgTeOxVRVAHAFFqvK+ll3idU5BoY=
+cloud.google.com/go/networksecurity v0.9.9/go.mod h1:aLS+6sLeZkMhLx9ntTMJG4qWHdvDPctqMOb6ggz9m5s=
+cloud.google.com/go/networksecurity v0.9.10/go.mod h1:pHy4lna09asqVhLwHVUXn92KGlM5oj1iSLFUwqqGZ2g=
+cloud.google.com/go/networksecurity v0.9.11/go.mod h1:4xbpOqCwplmFgymAjPFM6ZIplVC6+eQ4m7sIiEq9oJA=
+cloud.google.com/go/networksecurity v0.9.12/go.mod h1:Id0HGMKFJemLolvsoECda71vU2T9JByGPYct6LgMxrw=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8=
+cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k=
+cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A=
+cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ=
+cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70=
+cloud.google.com/go/notebooks v1.11.3/go.mod h1:0wQyI2dQC3AZyQqWnRsp+yA+kY4gC7ZIVP4Qg3AQcgo=
+cloud.google.com/go/notebooks v1.11.4/go.mod h1:vtqPiCQMv++HOfQMzyE46f4auCB843rf20KEQW2zZKM=
+cloud.google.com/go/notebooks v1.11.5/go.mod h1:pz6P8l2TvhWqAW3sysIsS0g2IUJKOzEklsjWJfi8sd4=
+cloud.google.com/go/notebooks v1.11.7/go.mod h1:lTjloYceMboZanBFC/JSZYet/K+JuO0mLAXVVhb/6bQ=
+cloud.google.com/go/notebooks v1.11.8/go.mod h1:jkRKhXWSXtzKtoPd9QeDzHrMPTYxf4l1rQP1/+6iR9g=
+cloud.google.com/go/notebooks v1.11.9/go.mod h1:JmnRX0eLgHRJiyxw8HOgumW9iRajImZxr7r75U16uXw=
+cloud.google.com/go/notebooks v1.11.10/go.mod h1:2d3Lwdm5VTxZzxY94V8TffNBk0FBnORieiVBeN+n9QQ=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk=
+cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU=
+cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8=
+cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo=
+cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY=
+cloud.google.com/go/optimization v1.6.3/go.mod h1:8ve3svp3W6NFcAEFr4SfJxrldzhUl4VMUJmhrqVKtYA=
+cloud.google.com/go/optimization v1.6.4/go.mod h1:AfXfr2vlBXCF9RPh/Jpj46FhXR5JiWlyHA0rGI5Eu5M=
+cloud.google.com/go/optimization v1.6.5/go.mod h1:eiJjNge1NqqLYyY75AtIGeQWKO0cvzD1ct/moCFaP2Q=
+cloud.google.com/go/optimization v1.6.7/go.mod h1:FREForRqqjTsJbElYyWSgb54WXUzTMTRyjVT+Tl80v8=
+cloud.google.com/go/optimization v1.6.8/go.mod h1:d/uDAEVA0JYzWO3bCcuC6nnZKTjrSWhNkCTFUOV39g0=
+cloud.google.com/go/optimization v1.6.9/go.mod h1:mcvkDy0p4s5k7iSaiKrwwpN0IkteHhGmuW5rP9nXA5M=
+cloud.google.com/go/optimization v1.6.10/go.mod h1:qWX4Kv90NeBgPfoRwyMbISe8M7Ql1LAOFPNFuOqIvUI=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8=
+cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0=
+cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs=
+cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI=
+cloud.google.com/go/orchestration v1.8.5/go.mod h1:C1J7HesE96Ba8/hZ71ISTV2UAat0bwN+pi85ky38Yq8=
+cloud.google.com/go/orchestration v1.9.0/go.mod h1:sq28tiaY9crFRQlxIcYUupwq3Tr5fLctHmDlc+3WE9c=
+cloud.google.com/go/orchestration v1.9.1/go.mod h1:yLPB2q/tdlEheIiZS7DAPKHeXdf4qNTlKAJCp/2EzXA=
+cloud.google.com/go/orchestration v1.9.2/go.mod h1:8bGNigqCQb/O1kK7PeStSNlyi58rQvZqDiuXT9KAcbg=
+cloud.google.com/go/orchestration v1.9.4/go.mod h1:jk5hczI8Tciq+WCkN32GpjWJs67GSmAA0XHFUlELJLw=
+cloud.google.com/go/orchestration v1.9.5/go.mod h1:64czIksdxj1B3pu0JXHVqwSmCZEoJfmuJWssWRXrVsc=
+cloud.google.com/go/orchestration v1.9.6/go.mod h1:gQvdIsHESZJigimnbUA8XLbYeFlSg/z+A7ppds5JULg=
+cloud.google.com/go/orchestration v1.9.7/go.mod h1:Mgtuci4LszRSzKkQucdWvdhTyG+QB4+3ZpsZ4sqalrQ=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M=
+cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE=
+cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o=
+cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM=
+cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI=
+cloud.google.com/go/orgpolicy v1.12.0/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI=
+cloud.google.com/go/orgpolicy v1.12.1/go.mod h1:aibX78RDl5pcK3jA8ysDQCFkVxLj3aOQqrbBaUL2V5I=
+cloud.google.com/go/orgpolicy v1.12.2/go.mod h1:XycP+uWN8Fev47r1XibYjOgZod8SjXQtZGsO2I8KXX8=
+cloud.google.com/go/orgpolicy v1.12.3/go.mod h1:6BOgIgFjWfJzTsVcib/4QNHOAeOjCdaBj69aJVs//MA=
+cloud.google.com/go/orgpolicy v1.12.5/go.mod h1:f778/jOHKp6cP6NbbQgjy4SDfQf6BoVGiSWdxky3ONQ=
+cloud.google.com/go/orgpolicy v1.12.6/go.mod h1:yEkOiKK4w2tBzxLFvjO9kqoIRBXoF29vFeNqhGiifpE=
+cloud.google.com/go/orgpolicy v1.12.7/go.mod h1:Os3GlUFRPf1UxOHTup5b70BARnhHeQNNVNZzJXPbWYI=
+cloud.google.com/go/orgpolicy v1.12.8/go.mod h1:WHkLGqHILPnMgJ4UTdag6YgztVIgWS+T5T6tywH3cSM=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc=
+cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE=
+cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0=
+cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM=
+cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA=
+cloud.google.com/go/osconfig v1.12.5/go.mod h1:D9QFdxzfjgw3h/+ZaAb5NypM8bhOMqBzgmbhzWViiW8=
+cloud.google.com/go/osconfig v1.12.6/go.mod h1:2dcXGl5qNbKo6Hjsnqbt5t6H2GX7UCAaPjF6BwDlFq8=
+cloud.google.com/go/osconfig v1.12.7/go.mod h1:ID7Lbqr0fiihKMwAOoPomWRqsZYKWxfiuafNZ9j1Y1M=
+cloud.google.com/go/osconfig v1.13.0/go.mod h1:tlACnQi1rtSLnHRYzfw9SH9zXs0M7S1jqiW2EOCn2Y0=
+cloud.google.com/go/osconfig v1.13.1/go.mod h1:3EcPSKozSco5jbdv2CZDojH0RVcRKvOdPrkrl+iHwuI=
+cloud.google.com/go/osconfig v1.13.2/go.mod h1:eupylkWQJCwSIEMkpVR4LqpgKkQi0mD4m1DzNCgpQso=
+cloud.google.com/go/osconfig v1.13.3/go.mod h1:gIFyyriC1ANob8SnpwrQ6jjNroRwItoBOYfqiG3LkUU=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs=
+cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE=
+cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg=
+cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU=
+cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY=
+cloud.google.com/go/oslogin v1.13.0/go.mod h1:xPJqLwpTZ90LSE5IL1/svko+6c5avZLluiyylMb/sRA=
+cloud.google.com/go/oslogin v1.13.1/go.mod h1:vS8Sr/jR7QvPWpCjNqy6LYZr5Zs1e8ZGW/KPn9gmhws=
+cloud.google.com/go/oslogin v1.13.2/go.mod h1:U8Euw2VeOEhJ/NE/0Q8xpInxi0J1oo2zdRNNVA/ba7U=
+cloud.google.com/go/oslogin v1.13.3/go.mod h1:WW7Rs1OJQ1iSUckZDilvNBSNPE8on740zF+4ZDR4o8U=
+cloud.google.com/go/oslogin v1.13.5/go.mod h1:V+QzBAbZBZJq9CmTyzKrh3rpMiWIr1OBn6RL4mMVWXI=
+cloud.google.com/go/oslogin v1.13.6/go.mod h1:7g1whx5UORkP8K8qGFhlc6njxFA35SX1V4dDNpWWku0=
+cloud.google.com/go/oslogin v1.13.7/go.mod h1:xq027cL0fojpcEcpEQdWayiDn8tIx3WEFYMM6+q7U+E=
+cloud.google.com/go/oslogin v1.13.8/go.mod h1:rc52yAdMXB5mERVeOXRcDnaswQNFTPRJ93VVHmGwJSk=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I=
+cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8=
+cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8=
+cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE=
+cloud.google.com/go/phishingprotection v0.8.5/go.mod h1:g1smd68F7mF1hgQPuYn3z8HDbNre8L6Z0b7XMYFmX7I=
+cloud.google.com/go/phishingprotection v0.8.6/go.mod h1:OSnaLSZryNaS80qVzArfi2/EoNWEeTSutTiWA/29xKU=
+cloud.google.com/go/phishingprotection v0.8.7/go.mod h1:FtYaOyGc/HQQU7wY4sfwYZBFDKAL+YtVBjUj8E3A3/I=
+cloud.google.com/go/phishingprotection v0.8.9/go.mod h1:xNojFKIdq+hNGNpOZOEGVGA4Mdhm2yByMli2Ni/RV0w=
+cloud.google.com/go/phishingprotection v0.8.10/go.mod h1:QJKnexvHGqL3u0qshpJBsjqCo+EEy3K/PrvogvcON8Q=
+cloud.google.com/go/phishingprotection v0.8.11/go.mod h1:Mge0cylqVFs+D0EyxlsTOJ1Guf3qDgrztHzxZqkhRQM=
+cloud.google.com/go/phishingprotection v0.8.12/go.mod h1:tkR+cZBpRdu4i04BP1CqaZr2yL7U1o8t+v/SZ2kOSDU=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0=
+cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU=
+cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64=
+cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U=
+cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs=
+cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0=
+cloud.google.com/go/policytroubleshooter v1.10.3/go.mod h1:+ZqG3agHT7WPb4EBIRqUv4OyIwRTZvsVDHZ8GlZaoxk=
+cloud.google.com/go/policytroubleshooter v1.10.4/go.mod h1:kSp7PKn80ttbKt8SSjQ0Z/pYYug/PFapxSx2Pr7xjf0=
+cloud.google.com/go/policytroubleshooter v1.10.5/go.mod h1:bpOf94YxjWUqsVKokzPBibMSAx937Jp2UNGVoMAtGYI=
+cloud.google.com/go/policytroubleshooter v1.10.7/go.mod h1:/JxxZOSCT8nASvH/SP4Bj81EnDFwZhFThG7mgVWIoPY=
+cloud.google.com/go/policytroubleshooter v1.10.8/go.mod h1:d+6phd7MABmER7PCqlHSWGE35NFDMJfu7cLjTr820UE=
+cloud.google.com/go/policytroubleshooter v1.10.9/go.mod h1:X8HEPVBWz8E+qwI/QXnhBLahEHdcuPO3M9YvSj0LDek=
+cloud.google.com/go/policytroubleshooter v1.10.10/go.mod h1:9S7SKOsLydGB2u91WKNjHpLScxxkKATIu3Co0fw8LPQ=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA=
+cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc=
+cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE=
+cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0=
+cloud.google.com/go/privatecatalog v0.9.5/go.mod h1:fVWeBOVe7uj2n3kWRGlUQqR/pOd450J9yZoOECcQqJk=
+cloud.google.com/go/privatecatalog v0.9.6/go.mod h1:BTwLqXfNzM6Tn4cTjzYj8avfw9+h/N68soYuTrYXL9I=
+cloud.google.com/go/privatecatalog v0.9.7/go.mod h1:NWLa8MCL6NkRSt8jhL8Goy2A/oHkvkeAxiA0gv0rIXI=
+cloud.google.com/go/privatecatalog v0.9.9/go.mod h1:attFfOEf8ECrCuCdT3WYY8wyMKRZt4iB1bEWYFzPn50=
+cloud.google.com/go/privatecatalog v0.9.10/go.mod h1:RxEAFdbH+8Ogu+1Lfp43KuAC6YIj46zWyoCX1dWB9nk=
+cloud.google.com/go/privatecatalog v0.9.11/go.mod h1:awEF2a8M6UgoqVJcF/MthkF8SSo6OoWQ7TtPNxUlljY=
+cloud.google.com/go/privatecatalog v0.9.12/go.mod h1:Sl292f/1xY0igI+CFNGfiXJWiN9BvaLpc8mjnCHNRnA=
+cloud.google.com/go/profiler v0.3.1/go.mod h1:GsG14VnmcMFQ9b+kq71wh3EKMZr3WRMgLzNiFRpW7tE=
+cloud.google.com/go/profiler v0.4.0/go.mod h1:RvPlm4dilIr3oJtAOeFQU9Lrt5RoySHSDj4pTd6TWeU=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c=
+cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE=
+cloud.google.com/go/pubsub v1.37.0/go.mod h1:YQOQr1uiUM092EXwKs56OPT650nwnawc+8/IjoUeGzQ=
+cloud.google.com/go/pubsub v1.38.0/go.mod h1:IPMJSWSus/cu57UyR01Jqa/bNOQA+XnPF6Z4dKW4fAA=
+cloud.google.com/go/pubsub v1.39.0/go.mod h1:FrEnrSGU6L0Kh3iBaAbIUM8KMR7LqyEkMboVxGXCT+s=
+cloud.google.com/go/pubsub v1.40.0/go.mod h1:BVJI4sI2FyXp36KFKvFwcfDRDfR8MiLT8mMhmIhdAeA=
+cloud.google.com/go/pubsub v1.41.0/go.mod h1:g+YzC6w/3N91tzG66e2BZtp7WrpBBMXVa3Y9zVoOGpk=
+cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
+cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU=
+cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0=
+cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw=
+cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w=
+cloud.google.com/go/recaptchaenterprise/v2 v2.8.4/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w=
+cloud.google.com/go/recaptchaenterprise/v2 v2.9.0/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w=
+cloud.google.com/go/recaptchaenterprise/v2 v2.9.2/go.mod h1:trwwGkfhCmp05Ll5MSJPXY7yvnO0p4v3orGANAFHAuU=
+cloud.google.com/go/recaptchaenterprise/v2 v2.11.0/go.mod h1:AgMf7QG7DBLcv8dstOz9+9dohjfh76IhBrkFxWdDL5Y=
+cloud.google.com/go/recaptchaenterprise/v2 v2.12.0/go.mod h1:4TohRUt9x4hzECD53xRFER+TJavgbep6riguPnsr4oQ=
+cloud.google.com/go/recaptchaenterprise/v2 v2.13.0/go.mod h1:jNYyn2ScR4DTg+VNhjhv/vJQdaU8qz+NpmpIzEE7HFQ=
+cloud.google.com/go/recaptchaenterprise/v2 v2.14.0/go.mod h1:pwC/eCyXq37YV3NSaiJsfOmuoTDkzURnVKAWGSkjDUY=
+cloud.google.com/go/recaptchaenterprise/v2 v2.14.1/go.mod h1:s1dcJEzWpEsgZN8aqHacC3mWUaQPd8q/QoibU/nkr18=
+cloud.google.com/go/recaptchaenterprise/v2 v2.14.2/go.mod h1:MwPgdgvBkE46aWuuXeBTCB8hQJ88p+CpXInROZYCTkc=
+cloud.google.com/go/recaptchaenterprise/v2 v2.14.3/go.mod h1:MiSHAXwja4btHPJFNJrDke//V+x83/ckXcdwbzn4+e8=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE=
+cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y=
+cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8=
+cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU=
+cloud.google.com/go/recommendationengine v0.8.5/go.mod h1:A38rIXHGFvoPvmy6pZLozr0g59NRNREz4cx7F58HAsQ=
+cloud.google.com/go/recommendationengine v0.8.6/go.mod h1:ratALtVdAkofp0vDzpkL87zJcTymiQLc7fQyohRKWoA=
+cloud.google.com/go/recommendationengine v0.8.7/go.mod h1:YsUIbweUcpm46OzpVEsV5/z+kjuV6GzMxl7OAKIGgKE=
+cloud.google.com/go/recommendationengine v0.8.9/go.mod h1:QgE5f6s20QhCXf4UR9KMI/Q6Spykd2zEYXX2oBz6Cbs=
+cloud.google.com/go/recommendationengine v0.8.10/go.mod h1:vlLaupkdqL3wuabhhjvrpH7TFswyxO6+P0L3AqrATPU=
+cloud.google.com/go/recommendationengine v0.8.11/go.mod h1:cEkU4tCXAF88a4boMFZym7U7uyxvVwcQtKzS85IbQio=
+cloud.google.com/go/recommendationengine v0.8.12/go.mod h1:A3c39mOVC4utWlwk+MpchvkZTM6MSJXm3KUwTQ47VzA=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA=
+cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII=
+cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18=
+cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y=
+cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4=
+cloud.google.com/go/recommender v1.12.0/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4=
+cloud.google.com/go/recommender v1.12.1/go.mod h1:gf95SInWNND5aPas3yjwl0I572dtudMhMIG4ni8nr+0=
+cloud.google.com/go/recommender v1.12.2/go.mod h1:9YizZzqpUtJelRv0pw2bfl3+3i5bTwL/FuAucj15WJc=
+cloud.google.com/go/recommender v1.12.3/go.mod h1:OgN0MjV7/6FZUUPgF2QPQtYErtZdZc4u+5onvurcGEI=
+cloud.google.com/go/recommender v1.12.5/go.mod h1:ggh5JNuG5ajpRqqcEkgni/DjpS7x12ktO+Edu8bmCJM=
+cloud.google.com/go/recommender v1.12.6/go.mod h1:BNNC/CEIGV3y6hQNjewrVx80PIidfFtf8D+6SCEgLnA=
+cloud.google.com/go/recommender v1.12.7/go.mod h1:lG8DVtczLltWuaCv4IVpNphONZTzaCC9KdxLYeZM5G4=
+cloud.google.com/go/recommender v1.12.8/go.mod h1:zoJL8kPJJotOoNU3D2fCXW33vhbyIPe0Sq7ObhYLnGM=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg=
+cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA=
+cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U=
+cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs=
+cloud.google.com/go/redis v1.14.2/go.mod h1:g0Lu7RRRz46ENdFKQ2EcQZBAJ2PtJHJLuiiRuEXwyQw=
+cloud.google.com/go/redis v1.14.3/go.mod h1:YtYX9QC98d3LEI9GUixwZ339Niw6w5xFcxLRruuFuss=
+cloud.google.com/go/redis v1.15.0/go.mod h1:X9Fp3vG5kqr5ho+5YM6AgJxypn+I9Ea5ANCuFKXLdX0=
+cloud.google.com/go/redis v1.16.0/go.mod h1:NLzG3Ur8ykVIZk+i5ienRnycsvWzQ0uCLcil6Htc544=
+cloud.google.com/go/redis v1.16.2/go.mod h1:bn/4nXSZkoH4QTXRjqWR2AZ0WA1b13ct354nul2SSiU=
+cloud.google.com/go/redis v1.16.3/go.mod h1:zqagsFk9fZzFKJB5NzijOUi53BeU5jUiPa4Kz/8Qz+Q=
+cloud.google.com/go/redis v1.16.4/go.mod h1:unCVfLP5eFrVhGLDnb7IaSaWxuZ+7cBgwwBwbdG9m9w=
+cloud.google.com/go/redis v1.16.5/go.mod h1:cWn6WHSEnmVZh9lJ9AN/UwDTtvlcT+TTRGvNIckUbG0=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8=
+cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE=
+cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U=
+cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0=
+cloud.google.com/go/resourcemanager v1.9.5/go.mod h1:hep6KjelHA+ToEjOfO3garMKi/CLYwTqeAw7YiEI9x8=
+cloud.google.com/go/resourcemanager v1.9.6/go.mod h1:d+XUOGbxg6Aka3lmC4fDiserslux3d15uX08C6a0MBg=
+cloud.google.com/go/resourcemanager v1.9.7/go.mod h1:cQH6lJwESufxEu6KepsoNAsjrUtYYNXRwxm4QFE5g8A=
+cloud.google.com/go/resourcemanager v1.9.9/go.mod h1:vCBRKurJv+XVvRZ0XFhI/eBrBM7uBOPFjMEwSDMIflY=
+cloud.google.com/go/resourcemanager v1.9.10/go.mod h1:UJ5zGD2ZD+Ng3MNxkU1fwBbpJQEQE1UctqpvV5pbP1M=
+cloud.google.com/go/resourcemanager v1.9.11/go.mod h1:SbNAbjVLoi2rt9G74bEYb3aw1iwvyWPOJMnij4SsmHA=
+cloud.google.com/go/resourcemanager v1.9.12/go.mod h1:unouv9x3+I+6kVeE10LGM3oJ8aQrUZganWnRchitbAM=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw=
+cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk=
+cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic=
+cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI=
+cloud.google.com/go/resourcesettings v1.6.5/go.mod h1:WBOIWZraXZOGAgoR4ukNj0o0HiSMO62H9RpFi9WjP9I=
+cloud.google.com/go/resourcesettings v1.6.6/go.mod h1:t1+N03/gwNuKyOqpnACg/hWNL7ujT8mQYGqOzxOjFVE=
+cloud.google.com/go/resourcesettings v1.6.7/go.mod h1:zwRL5ZoNszs1W6+eJYMk6ILzgfnTj13qfU4Wvfupuqk=
+cloud.google.com/go/resourcesettings v1.7.0/go.mod h1:pFzZYOQMyf1hco9pbNWGEms6N/2E7nwh0oVU1Tz+4qA=
+cloud.google.com/go/resourcesettings v1.7.2/go.mod h1:mNdB5Wl9/oVr9Da3OrEstSyXCT949ignvO6ZrmYdmGU=
+cloud.google.com/go/resourcesettings v1.7.3/go.mod h1:lMSnOoQPDKzcF6LGJOBcQqGCY2Zm8ZhbHEzhqdU61S8=
+cloud.google.com/go/resourcesettings v1.7.4/go.mod h1:seBdLuyeq+ol2u9G2+74GkSjQaxaBWF+vVb6mVzQFG0=
+cloud.google.com/go/resourcesettings v1.7.5/go.mod h1:voqqKzYIrnoAqFKV6xk2qhgTnxzfGCJNOuBnHJEzcNU=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE=
+cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8=
+cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo=
+cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg=
+cloud.google.com/go/retail v1.15.1/go.mod h1:In9nSBOYhLbDGa87QvWlnE1XA14xBN2FpQRiRsUs9wU=
+cloud.google.com/go/retail v1.16.0/go.mod h1:LW7tllVveZo4ReWt68VnldZFWJRzsh9np+01J9dYWzE=
+cloud.google.com/go/retail v1.16.1/go.mod h1:xzHOcNrzFB5aew1AjWhZAPnHF2oCGqt7hMmTlrzQqAs=
+cloud.google.com/go/retail v1.16.2/go.mod h1:T7UcBh4/eoxRBpP3vwZCoa+PYA9/qWRTmOCsV8DRdZ0=
+cloud.google.com/go/retail v1.17.0/go.mod h1:GZ7+J084vyvCxO1sjdBft0DPZTCA/lMJ46JKWxWeb6w=
+cloud.google.com/go/retail v1.17.2/go.mod h1:Ad6D8tkDZatI1X7szhhYWiatZmH6nSUfZ3WeCECyA0E=
+cloud.google.com/go/retail v1.17.3/go.mod h1:8OWmRAUXg8PKs1ef+VwrBLYBRdYJxq+YyxiytMaUBRI=
+cloud.google.com/go/retail v1.17.4/go.mod h1:oPkL1FzW7D+v/hX5alYIx52ro2FY/WPAviwR1kZZTMs=
+cloud.google.com/go/retail v1.17.5/go.mod h1:DSWPessLdnuvRH+N2FY+j1twyKtpRDKp4Y88dm7VqBw=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo=
+cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU=
+cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s=
+cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE=
+cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4=
+cloud.google.com/go/run v1.3.4/go.mod h1:FGieuZvQ3tj1e9GnzXqrMABSuir38AJg5xhiYq+SF3o=
+cloud.google.com/go/run v1.3.5/go.mod h1:AuT4W7a7gq7TUa0GrX9F6w/uiUoWHIV767yhqXqcbeg=
+cloud.google.com/go/run v1.3.6/go.mod h1:/ou4d0u5CcK5/44Hbpd3wsBjNFXmn6YAWChu+XAKwSU=
+cloud.google.com/go/run v1.3.7/go.mod h1:iEUflDx4Js+wK0NzF5o7hE9Dj7QqJKnRj0/b6rhVq20=
+cloud.google.com/go/run v1.3.9/go.mod h1:Ep/xsiUt5ZOwNptGl1FBlHb+asAgqB+9RDJKBa/c1mI=
+cloud.google.com/go/run v1.3.10/go.mod h1:zQGa7V57WWZhyiUYMlYitrBZzR+d2drzJQvrpaQ8YIA=
+cloud.google.com/go/run v1.4.0/go.mod h1:4G9iHLjdOC+CQ0CzA0+6nLeR6NezVPmlj+GULmb0zE4=
+cloud.google.com/go/run v1.4.1/go.mod h1:gaXIpytRDfrJjb3pz9PRG2q2KUaDDDV+Uvmq6QRZH20=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo=
+cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY=
+cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc=
+cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI=
+cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI=
+cloud.google.com/go/scheduler v1.10.6/go.mod h1:pe2pNCtJ+R01E06XCDOJs1XvAMbv28ZsQEbqknxGOuE=
+cloud.google.com/go/scheduler v1.10.7/go.mod h1:AfKUtlPF0D2xtfWy+k6rQFaltcBeeoSOY7XKQkWs+1s=
+cloud.google.com/go/scheduler v1.10.8/go.mod h1:0YXHjROF1f5qTMvGTm4o7GH1PGAcmu/H/7J7cHOiHl0=
+cloud.google.com/go/scheduler v1.10.10/go.mod h1:nOLkchaee8EY0g73hpv613pfnrZwn/dU2URYjJbRLR0=
+cloud.google.com/go/scheduler v1.10.11/go.mod h1:irpDaNL41B5q8hX/Ki87hzkxO8FnZEhhZnFk6OP8TnE=
+cloud.google.com/go/scheduler v1.10.12/go.mod h1:6DRtOddMWJ001HJ6MS148rtLSh/S2oqd2hQC3n5n9fQ=
+cloud.google.com/go/scheduler v1.10.13/go.mod h1:lDJItkp2hNrCsHOBtVExCzjXBzK9WI3yKNg713/OU4s=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw=
+cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss=
+cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI=
+cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w=
+cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4=
+cloud.google.com/go/secretmanager v1.12.0/go.mod h1:Y1Gne3Ag+fZ2TDTiJc8ZJCMFbi7k1rYT4Rw30GXfvlk=
+cloud.google.com/go/secretmanager v1.13.1/go.mod h1:y9Ioh7EHp1aqEKGYXk3BOC+vkhlHm9ujL7bURT4oI/4=
+cloud.google.com/go/secretmanager v1.13.3/go.mod h1:e45+CxK0w6GaL4hS+KabgQskl4RdSS30b+HRf0TH0kk=
+cloud.google.com/go/secretmanager v1.13.4/go.mod h1:SjKHs6rx0ELUqfbRWrWq4e7SiNKV7QMWZtvZsQm3k5w=
+cloud.google.com/go/secretmanager v1.13.5/go.mod h1:/OeZ88l5Z6nBVilV0SXgv6XJ243KP2aIhSWRMrbvDCQ=
+cloud.google.com/go/secretmanager v1.13.6/go.mod h1:x2ySyOrqv3WGFRFn2Xk10iHmNmvmcEVSSqc30eb1bhw=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA=
+cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg=
+cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs=
+cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4=
+cloud.google.com/go/security v1.15.5/go.mod h1:KS6X2eG3ynWjqcIX976fuToN5juVkF6Ra6c7MPnldtc=
+cloud.google.com/go/security v1.15.6/go.mod h1:UMEAGVBMqE6xZvkCR1FvUIeBEmGOCRIDwtwT357xmok=
+cloud.google.com/go/security v1.17.0/go.mod h1:eSuFs0SlBv1gWg7gHIoF0hYOvcSwJCek/GFXtgO6aA0=
+cloud.google.com/go/security v1.17.2/go.mod h1:6eqX/AgDw56KwguEBfFNiNQ+Vzi+V6+GopklexYuJ0U=
+cloud.google.com/go/security v1.17.3/go.mod h1:CuKzQq5OD6TXAYaZs/jI0d7CNHoD0LXbpsznIIIn4f4=
+cloud.google.com/go/security v1.17.4/go.mod h1:KMuDJH+sEB3KTODd/tLJ7kZK+u2PQt+Cfu0oAxzIhgo=
+cloud.google.com/go/security v1.17.5/go.mod h1:MA8w7SbQAQO9CQ9r0R7HR0F7g1AJoqx87SFLpapq3OU=
+cloud.google.com/go/security v1.18.0/go.mod h1:oS/kRVUNmkwEqzCgSmK2EaGd8SbDUvliEiADjSb/8Mo=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ=
+cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s=
+cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI=
+cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM=
+cloud.google.com/go/securitycenter v1.24.3/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM=
+cloud.google.com/go/securitycenter v1.24.4/go.mod h1:PSccin+o1EMYKcFQzz9HMMnZ2r9+7jbc+LvPjXhpwcU=
+cloud.google.com/go/securitycenter v1.27.0/go.mod h1:J8BkC1KQ59pzVwFZ9OiqcKL57JwPNEv8SMxfVztjqD0=
+cloud.google.com/go/securitycenter v1.28.0/go.mod h1:kmS8vAIwPbCIg7dDuiVKF/OTizYfuWe5f0IIW6NihN8=
+cloud.google.com/go/securitycenter v1.30.0/go.mod h1:/tmosjS/dfTnzJxOzZhTXdX3MXWsCmPWfcYOgkJmaJk=
+cloud.google.com/go/securitycenter v1.32.0/go.mod h1:s1dN6hM6HZyzUyJrqBoGvhxR/GecT5u48sidMIgDxTo=
+cloud.google.com/go/securitycenter v1.33.0/go.mod h1:lkEPItFjC1RRBHniiWR3lJTpUJW+7+EFAb7nP5ZCQxI=
+cloud.google.com/go/securitycenter v1.33.1/go.mod h1:jeFisdYUWHr+ig72T4g0dnNCFhRwgwGoQV6GFuEwafw=
+cloud.google.com/go/securitycenter v1.34.0/go.mod h1:7esjYVxn7k0nm02CnLNueFWD40FH0eunhookSEUalSs=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
+cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI=
+cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg=
+cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw=
+cloud.google.com/go/servicedirectory v1.11.4/go.mod h1:Bz2T9t+/Ehg6x+Y7Ycq5xiShYLD96NfEsWNHyitj1qM=
+cloud.google.com/go/servicedirectory v1.11.5/go.mod h1:hp2Ix2Qko7hIh5jaFWftbdwKXHQhYPijcGPpLgTVZvw=
+cloud.google.com/go/servicedirectory v1.11.7/go.mod h1:fiO/tM0jBpVhpCAe7Yp5HmEsmxSUcOoc4vPrO02v68I=
+cloud.google.com/go/servicedirectory v1.11.9/go.mod h1:qiDNuIS2qxuuroSmPNuXWxoFMvsEudKXP62Wos24BsU=
+cloud.google.com/go/servicedirectory v1.11.10/go.mod h1:pgbBjH2r73lEd3Y7eNA64fRO3g1zL96PMu+/hAjkH6g=
+cloud.google.com/go/servicedirectory v1.11.11/go.mod h1:pnynaftaj9LmRLIc6t3r7r7rdCZZKKxui/HaF/RqYfs=
+cloud.google.com/go/servicedirectory v1.11.12/go.mod h1:A0mXC1awKEK5alkG7p3hxaHtb5SSPqAdeWx09RTIOGY=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g=
+cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc=
+cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc=
+cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM=
+cloud.google.com/go/shell v1.7.5/go.mod h1:hL2++7F47/IfpfTO53KYf1EC+F56k3ThfNEXd4zcuiE=
+cloud.google.com/go/shell v1.7.6/go.mod h1:Ax+fG/h5TbwbnlhyzkgMeDK7KPfINYWE0V/tZUuuPXo=
+cloud.google.com/go/shell v1.7.7/go.mod h1:7OYaMm3TFMSZBh8+QYw6Qef+fdklp7CjjpxYAoJpZbQ=
+cloud.google.com/go/shell v1.7.9/go.mod h1:h3wVC6qaQ1nIlSWMasl1e/uwmepVbZpjSk/Bn7ZafSc=
+cloud.google.com/go/shell v1.7.10/go.mod h1:1sKAD5ijarrTLPX0VMQai6jCduRxaU2A6w0JWVGCNag=
+cloud.google.com/go/shell v1.7.11/go.mod h1:SywZHWac7onifaT9m9MmegYp3GgCLm+tgk+w2lXK8vg=
+cloud.google.com/go/shell v1.7.12/go.mod h1:QxxwQMvXqDUTYgMwbO7Y2Z6rojGzA7q64aQTCEj7xfM=
+cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk=
+cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs=
+cloud.google.com/go/spanner v1.18.0/go.mod h1:LvAjUXPeJRGNuGpikMULjhLj/t9cRvdc+fxRoLiugXA=
+cloud.google.com/go/spanner v1.25.0/go.mod h1:kQUft3x355hzzaeFbObjsvkzZDgpDkesp3v75WBnI8w=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.45.1/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI=
+cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM=
+cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM=
+cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0=
+cloud.google.com/go/spanner v1.53.0/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws=
+cloud.google.com/go/spanner v1.53.1/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws=
+cloud.google.com/go/spanner v1.54.0/go.mod h1:wZvSQVBgngF0Gq86fKup6KIYmN2be7uOKjtK97X+bQU=
+cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0=
+cloud.google.com/go/spanner v1.56.0/go.mod h1:DndqtUKQAt3VLuV2Le+9Y3WTnq5cNKrnLb/Piqcj+h0=
+cloud.google.com/go/spanner v1.57.0/go.mod h1:aXQ5QDdhPRIqVhYmnkAdwPYvj/DRN0FguclhEWw+jOo=
+cloud.google.com/go/spanner v1.58.0/go.mod h1:eSpP+aaT38sdwFs0ovo7Z1VsG9RyM9n7EHpM5DhDEsE=
+cloud.google.com/go/spanner v1.60.0/go.mod h1:D2bOAeT/dC6zsZhXRIxbdYa5nQEYU3wYM/1KN3eg7Fs=
+cloud.google.com/go/spanner v1.63.0/go.mod h1:iqDx7urZpgD7RekZ+CFvBRH6kVTW1ZSEb2HMDKOp5Cc=
+cloud.google.com/go/spanner v1.64.0/go.mod h1:TOFx3pb2UwPsDGlE1gTehW+y6YlU4IFk+VdDHSGQS/M=
+cloud.google.com/go/spanner v1.65.0/go.mod h1:dQGB+w5a67gtyE3qSKPPxzniedrnAmV6tewQeBY7Hxs=
+cloud.google.com/go/spanner v1.67.0/go.mod h1:Um+TNmxfcCHqNCKid4rmAMvoe/Iu1vdz6UfxJ9GPxRQ=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA=
+cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI=
+cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY=
+cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY=
+cloud.google.com/go/speech v1.21.1/go.mod h1:E5GHZXYQlkqWQwY5xRSLHw2ci5NMQNG52FfMU1aZrIA=
+cloud.google.com/go/speech v1.22.0/go.mod h1:d7pmrSKyrD12c7dRrjqgA/U0eeyZs0i4VpvOlpJXEBA=
+cloud.google.com/go/speech v1.22.1/go.mod h1:s8C9OLTemdGb4FHX3imHIp5AanwKR4IhdSno0Cg1s7k=
+cloud.google.com/go/speech v1.23.1/go.mod h1:UNgzNxhNBuo/OxpF1rMhA/U2rdai7ILL6PBXFs70wq0=
+cloud.google.com/go/speech v1.23.3/go.mod h1:u7tK/jxhzRZwZ5Nujhau7iLI3+VfJKYhpoZTjU7hRsE=
+cloud.google.com/go/speech v1.23.4/go.mod h1:pv5VPKuXsZStCnTBImQP8HDfQHgG4DxJSlDyx5Kcwak=
+cloud.google.com/go/speech v1.24.0/go.mod h1:HcVyIh5jRXM5zDMcbFCW+DF2uK/MSGN6Rastt6bj1ic=
+cloud.google.com/go/speech v1.24.1/go.mod h1:th/IKNidPLzrbaEiKLIhTv/oTGADe4r4bzxZvYG62EE=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8=
+cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k=
+cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY=
+cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o=
+cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g=
+cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80=
+cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ=
+cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA=
+cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs=
+cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA=
+cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc=
+cloud.google.com/go/storagetransfer v1.10.4/go.mod h1:vef30rZKu5HSEf/x1tK3WfWrL0XVoUQN/EPDRGPzjZs=
+cloud.google.com/go/storagetransfer v1.10.5/go.mod h1:086WXPZlWXLfql+/nlmcc8ZzFWvITqfSGUQyMdf5eBk=
+cloud.google.com/go/storagetransfer v1.10.6/go.mod h1:3sAgY1bx1TpIzfSzdvNGHrGYldeCTyGI/Rzk6Lc6A7w=
+cloud.google.com/go/storagetransfer v1.10.8/go.mod h1:fEGWYffkV9OYOKms8nxyJWIZA7iEWPl2Mybk6bpQnEk=
+cloud.google.com/go/storagetransfer v1.10.9/go.mod h1:QKkg5Wau5jc0iXlPOZyEv3hH9mjCLeYIBiRrZTf6Ehw=
+cloud.google.com/go/storagetransfer v1.10.10/go.mod h1:8+nX+WgQ2ZJJnK8e+RbK/zCXk8T7HdwyQAJeY7cEcm0=
+cloud.google.com/go/storagetransfer v1.10.11/go.mod h1:AMAR/PTS5yKPp1FHP6rk3eJYGmHF14vQYiHddcIgoOA=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24=
+cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo=
+cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY=
+cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI=
+cloud.google.com/go/talent v1.6.6/go.mod h1:y/WQDKrhVz12WagoarpAIyKKMeKGKHWPoReZ0g8tseQ=
+cloud.google.com/go/talent v1.6.7/go.mod h1:OLojlmmygm0wuTqi+UXKO0ZdLHsAedUfDgxDrkIWxTo=
+cloud.google.com/go/talent v1.6.8/go.mod h1:kqPAJvhxmhoUTuqxjjk2KqA8zUEeTDmH+qKztVubGlQ=
+cloud.google.com/go/talent v1.6.10/go.mod h1:q2/qIb2Eb2svmeBfkCGIia/NGmkcScdyYSyNNOgFRLI=
+cloud.google.com/go/talent v1.6.11/go.mod h1:tmMptbP5zTw6tjudgip8LObeh7E4xHNC/IYsiGtxnrc=
+cloud.google.com/go/talent v1.6.12/go.mod h1:nT9kNVuJhZX2QgqKZS6t6eCWZs5XEBYRBv6bIMnPmo4=
+cloud.google.com/go/talent v1.6.13/go.mod h1:jqjQzIF7ZPCxFSdsfhgUF0wGB+mbytYzyUqaHLiQcQg=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk=
+cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4=
+cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so=
+cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74=
+cloud.google.com/go/texttospeech v1.7.5/go.mod h1:tzpCuNWPwrNJnEa4Pu5taALuZL4QRRLcb+K9pbhXT6M=
+cloud.google.com/go/texttospeech v1.7.6/go.mod h1:nhRJledkoE6/6VvEq/d0CX7nPnDwc/uzfaqePlmiPVE=
+cloud.google.com/go/texttospeech v1.7.7/go.mod h1:XO4Wr2VzWHjzQpMe3gS58Oj68nmtXMyuuH+4t0wy9eA=
+cloud.google.com/go/texttospeech v1.7.9/go.mod h1:nuo7l7CVWUMvaTgswbn/hhn2Tv73/WbenqGyc236xpo=
+cloud.google.com/go/texttospeech v1.7.10/go.mod h1:ChThPazSxR7e4qe9ryRlFGU4lRONvL9Oo2geyp7LX4o=
+cloud.google.com/go/texttospeech v1.7.11/go.mod h1:Ua125HU+WT2IkIo5MzQtuNpNEk72soShJQVdorZ1SAE=
+cloud.google.com/go/texttospeech v1.7.12/go.mod h1:B1Xck47Mhy/PJMnvrLkv0gfKGinGP78c0XFZjWB7TdY=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E=
+cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU=
+cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY=
+cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y=
+cloud.google.com/go/tpu v1.6.5/go.mod h1:P9DFOEBIBhuEcZhXi+wPoVy/cji+0ICFi4TtTkMHSSs=
+cloud.google.com/go/tpu v1.6.6/go.mod h1:T4gCNpT7SO28mMkCVJTWQ3OXAUY3YlScOqU4+5iX2B8=
+cloud.google.com/go/tpu v1.6.7/go.mod h1:o8qxg7/Jgt7TCgZc3jNkd4kTsDwuYD3c4JTMqXZ36hU=
+cloud.google.com/go/tpu v1.6.9/go.mod h1:6C7Ed7Le5Y1vWGR+8lQWsh/gmqK6l53lgji0YXBU40o=
+cloud.google.com/go/tpu v1.6.10/go.mod h1:O+N+S0i3bOH6NJ+s9GPsg9LC7jnE1HRSp8CSRYjCrfM=
+cloud.google.com/go/tpu v1.6.11/go.mod h1:W0C4xaSj1Ay3VX/H96FRvLt2HDs0CgdRPVI4e7PoCDk=
+cloud.google.com/go/tpu v1.6.12/go.mod h1:IFJa2vI7gxF6fypOQXYmbuFwKLsde4zVwcv1p9zhOqY=
+cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g=
+cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk=
+cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA=
+cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY=
+cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY=
+cloud.google.com/go/trace v1.10.5/go.mod h1:9hjCV1nGBCtXbAE4YK7OqJ8pmPYSxPA0I67JwRd5s3M=
+cloud.google.com/go/trace v1.10.6/go.mod h1:EABXagUjxGuKcZMy4pXyz0fJpE5Ghog3jzTxcEsVJS4=
+cloud.google.com/go/trace v1.10.7/go.mod h1:qk3eiKmZX0ar2dzIJN/3QhY2PIFh1eqcIdaN5uEjQPM=
+cloud.google.com/go/trace v1.10.9/go.mod h1:vtWRnvEh+d8h2xljwxVwsdxxpoWZkxcNYnJF3FuJUV8=
+cloud.google.com/go/trace v1.10.10/go.mod h1:5b1BiSYQO27KgGRevNFfoIQ8czwpVgnkKbTLb4wV+XM=
+cloud.google.com/go/trace v1.10.11/go.mod h1:fUr5L3wSXerNfT0f1bBg08W4axS2VbHGgYcfH4KuTXU=
+cloud.google.com/go/trace v1.10.12/go.mod h1:tYkAIta/gxgbBZ/PIzFxSH5blajgX4D00RpQqCG/GZs=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
+cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8=
+cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY=
+cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0=
+cloud.google.com/go/translate v1.10.0/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0=
+cloud.google.com/go/translate v1.10.1/go.mod h1:adGZcQNom/3ogU65N9UXHOnnSvjPwA/jKQUMnsYXOyk=
+cloud.google.com/go/translate v1.10.2/go.mod h1:M4xIFGUwTrmuhyMMpJFZrBuSOhaX7Fhj4U1//mfv4BE=
+cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs=
+cloud.google.com/go/translate v1.10.5/go.mod h1:n9fFca4U/EKr2GzJKrnQXemlYhfo1mT1nSt7Rt4l/VA=
+cloud.google.com/go/translate v1.10.6/go.mod h1:vqZOHurggOqpssx/agK9S21UdStpwugMOhlHvWEGAdw=
+cloud.google.com/go/translate v1.10.7/go.mod h1:mH/+8tvcItuy1cOWqU+/Y3iFHgkVUObNIQYI/kiFFiY=
+cloud.google.com/go/translate v1.11.0/go.mod h1:UFNHzrfcEo/ZCmA5SveVqxh0l57BP27HCvroN5o59FI=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
+cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM=
+cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU=
+cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA=
+cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU=
+cloud.google.com/go/video v1.20.4/go.mod h1:LyUVjyW+Bwj7dh3UJnUGZfyqjEto9DnrvTe1f/+QrW0=
+cloud.google.com/go/video v1.20.5/go.mod h1:tCaG+vfAM6jmkwHvz2M0WU3KhiXpmDbQy3tBryMo8I0=
+cloud.google.com/go/video v1.20.6/go.mod h1:d5AOlIfWXpDg15wvztHmjFvKTTImWJU7EnMVWkoiEAk=
+cloud.google.com/go/video v1.21.0/go.mod h1:Kqh97xHXZ/bIClgDHf5zkKvU3cvYnLyRefmC8yCBqKI=
+cloud.google.com/go/video v1.21.2/go.mod h1:UNXGQj3Hdyb70uaF9JeeM8Y8BAmAzLEMSWmyBKY2iVM=
+cloud.google.com/go/video v1.21.3/go.mod h1:tp2KqkcxNEL5k2iF2Hd38aIWlNo/ew+i1yklhlyq6BM=
+cloud.google.com/go/video v1.22.0/go.mod h1:CxPshUNAb1ucnzbtruEHlAal9XY+SPG2cFqC/woJzII=
+cloud.google.com/go/video v1.22.1/go.mod h1:+AYF4e9kqQhra0AfKPoOOIUK0Ho7BquOWQK+Te+Qnns=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo=
+cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc=
+cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo=
+cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8=
+cloud.google.com/go/videointelligence v1.11.5/go.mod h1:/PkeQjpRponmOerPeJxNPuxvi12HlW7Em0lJO14FC3I=
+cloud.google.com/go/videointelligence v1.11.6/go.mod h1:b6dd26k4jUM+9evzWxLK1QDwVvoOA1piEYiTDv3jF6w=
+cloud.google.com/go/videointelligence v1.11.7/go.mod h1:iMCXbfjurmBVgKuyLedTzv90kcnppOJ6ttb0+rLDID0=
+cloud.google.com/go/videointelligence v1.11.9/go.mod h1:Mv0dgb6U12BfBRPj39nM/7gcAFS1+VVGpTiyMJ/ShPo=
+cloud.google.com/go/videointelligence v1.11.10/go.mod h1:5oW8qq+bk8Me+3fNoQK+27CCw4Nsuk/YN7zMw7vNDTA=
+cloud.google.com/go/videointelligence v1.11.11/go.mod h1:dab2Ca3AXT6vNJmt3/6ieuquYRckpsActDekLcsd6dU=
+cloud.google.com/go/videointelligence v1.11.12/go.mod h1:dQlDAFtTwsZi3UI+03NVF4XQoarx0VU5/IKMLyVyC2E=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU=
+cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM=
+cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw=
+cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM=
+cloud.google.com/go/vision/v2 v2.7.6/go.mod h1:ZkvWTVNPBU3YZYzgF9Y1jwEbD1NBOCyJn0KFdQfE6Bw=
+cloud.google.com/go/vision/v2 v2.8.0/go.mod h1:ocqDiA2j97pvgogdyhoxiQp2ZkDCyr0HWpicywGGRhU=
+cloud.google.com/go/vision/v2 v2.8.1/go.mod h1:0n3GzR+ZyRVDHTH5koELHFqIw3lXaFdLzlHUvlXNWig=
+cloud.google.com/go/vision/v2 v2.8.2/go.mod h1:BHZA1LC7dcHjSr9U9OVhxMtLKd5l2jKPzLRALEJvuaw=
+cloud.google.com/go/vision/v2 v2.8.4/go.mod h1:qlmeVbmCfPNuD1Kwa7/evqCJYoJ7WhiZ2XeVSYwiOaA=
+cloud.google.com/go/vision/v2 v2.8.5/go.mod h1:3X2ni4uSzzqpj8zTUD6aia62O1NisD19JH3l5i0CoM4=
+cloud.google.com/go/vision/v2 v2.8.6/go.mod h1:G3v0uovxCye3u369JfrHGY43H6u/IQ08x9dw5aVH8yY=
+cloud.google.com/go/vision/v2 v2.8.7/go.mod h1:4ADQGbgAAvEDn/2I6XLeBN6mCUq6D44bfjWaqQc6iYU=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro=
+cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8=
+cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo=
+cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70=
+cloud.google.com/go/vmmigration v1.7.5/go.mod h1:pkvO6huVnVWzkFioxSghZxIGcsstDvYiVCxQ9ZH3eYI=
+cloud.google.com/go/vmmigration v1.7.6/go.mod h1:HpLc+cOfjHgW0u6jdwcGlOSbkeemIEwGiWKS+8Mqy1M=
+cloud.google.com/go/vmmigration v1.7.7/go.mod h1:qYIK5caZY3IDMXQK+A09dy81QU8qBW0/JDTc39OaKRw=
+cloud.google.com/go/vmmigration v1.7.9/go.mod h1:x5LQyAESUXsI7/QAQY6BV8xEjIrlkGI+S+oau/Sb0Gs=
+cloud.google.com/go/vmmigration v1.7.10/go.mod h1:VkoA4ktmA0C3fr7LqhthGtGWEmgM7WHWg6ObxeXR5lU=
+cloud.google.com/go/vmmigration v1.7.11/go.mod h1:PmD1fDB0TEHGQR1tDZt9GEXFB9mnKKalLcTVRJKzcQA=
+cloud.google.com/go/vmmigration v1.7.12/go.mod h1:Fb6yZsMdgFUo3wdDc7vK75KmBzXkY1Tio/053vuvCXU=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk=
+cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA=
+cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4=
+cloud.google.com/go/vmwareengine v1.1.1/go.mod h1:nMpdsIVkUrSaX8UvmnBhzVzG7PPvNYc5BszcvIVudYs=
+cloud.google.com/go/vmwareengine v1.1.2/go.mod h1:7wZHC+0NM4TnQE8gUpW397KgwccH+fAnc4Lt5zB0T1k=
+cloud.google.com/go/vmwareengine v1.1.3/go.mod h1:UoyF6LTdrIJRvDN8uUB8d0yimP5A5Ehkr1SRzL1APZw=
+cloud.google.com/go/vmwareengine v1.1.5/go.mod h1:Js6QbSeC1OgpyygalCrMj90wa93O3kFgcs/u1YzCKsU=
+cloud.google.com/go/vmwareengine v1.1.6/go.mod h1:9txHCR2yJ6H9pFsfehTXLte5uvl/wOiM2PCtcVfglvI=
+cloud.google.com/go/vmwareengine v1.2.0/go.mod h1:rPjCHu6hG9N8d6PhkoDWFkqL9xpbFY+ueVW+0pNFbZg=
+cloud.google.com/go/vmwareengine v1.2.1/go.mod h1:OE5z8qJdTiPpSeWunFenN/RMF7ymRgI0HvJ/c7Zl5U0=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs=
+cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU=
+cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc=
+cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk=
+cloud.google.com/go/vpcaccess v1.7.5/go.mod h1:slc5ZRvvjP78c2dnL7m4l4R9GwL3wDLcpIWz6P/ziig=
+cloud.google.com/go/vpcaccess v1.7.6/go.mod h1:BV6tTobbojd2AhrEOBLfywFUJlFU63or5Qgd0XrFsCc=
+cloud.google.com/go/vpcaccess v1.7.7/go.mod h1:EzfSlgkoAnFWEMznZW0dVNvdjFjEW97vFlKk4VNBhwY=
+cloud.google.com/go/vpcaccess v1.7.9/go.mod h1:Y0BlcnG9yTkoM6IL6auBeKvVEXL4LmNIxzscekrn/uk=
+cloud.google.com/go/vpcaccess v1.7.10/go.mod h1:69kdbMh8wvGcM3agEHP1YnHPyxIBSRcZuK+KWZlpVLI=
+cloud.google.com/go/vpcaccess v1.7.11/go.mod h1:a2cuAiSCI4TVK0Dt6/dRjf22qQvfY+podxst2VvAkcI=
+cloud.google.com/go/vpcaccess v1.7.12/go.mod h1:Bt9j9aqlNDj1xW5uMNrHyhpc61JZgttbQRecG9xm1cE=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc=
+cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc=
+cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8=
+cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0=
+cloud.google.com/go/webrisk v1.9.5/go.mod h1:aako0Fzep1Q714cPEM5E+mtYX8/jsfegAuS8aivxy3U=
+cloud.google.com/go/webrisk v1.9.6/go.mod h1:YzrDCXBOpnC64+GRRpSXPMQSvR8I4r5YO78y7A/T0Ac=
+cloud.google.com/go/webrisk v1.9.7/go.mod h1:7FkQtqcKLeNwXCdhthdXHIQNcFWPF/OubrlyRcLHNuQ=
+cloud.google.com/go/webrisk v1.9.9/go.mod h1:Wre67XdNQbt0LCBrvwVNBS5ORb8ssixq/u04CCZoO+k=
+cloud.google.com/go/webrisk v1.9.10/go.mod h1:wDxtALjJMXlGR2c3qtZaVI5jRKcneIMTYqV1IA1jPmo=
+cloud.google.com/go/webrisk v1.9.11/go.mod h1:mK6M8KEO0ZI7VkrjCq3Tjzw4vYq+3c4DzlMUDVaiswE=
+cloud.google.com/go/webrisk v1.9.12/go.mod h1:YaAgE2xKzIN8yQNUspTTeZbvdcifSJh+wcMyXmp8fgg=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
+cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas=
+cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw=
+cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o=
+cloud.google.com/go/websecurityscanner v1.6.5/go.mod h1:QR+DWaxAz2pWooylsBF854/Ijvuoa3FCyS1zBa1rAVQ=
+cloud.google.com/go/websecurityscanner v1.6.6/go.mod h1:zjsc4h9nV1sUxuSMurR2v3gJwWKYorJ+Nanm+1/w6G0=
+cloud.google.com/go/websecurityscanner v1.6.7/go.mod h1:EpiW84G5KXxsjtFKK7fSMQNt8JcuLA8tQp7j0cyV458=
+cloud.google.com/go/websecurityscanner v1.6.9/go.mod h1:xrMxPiHB5iFxvc2tqbfUr6inPox6q6y7Wg0LTyZOKTw=
+cloud.google.com/go/websecurityscanner v1.6.10/go.mod h1:ndil05bWkG/KDgWAXwFFAuvOYcOKu+mk/wC/nIfLQwE=
+cloud.google.com/go/websecurityscanner v1.6.11/go.mod h1:vhAZjksELSg58EZfUQ1BMExD+hxqpn0G0DuyCZQjiTg=
+cloud.google.com/go/websecurityscanner v1.6.12/go.mod h1:9WFCBNpS0EIIhQaqiNC3ezZ48qisGPh3Ekz6T2n9Ioc=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
+cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM=
+cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM=
+cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc=
+cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g=
+cloud.google.com/go/workflows v1.12.4/go.mod h1:yQ7HUqOkdJK4duVtMeBCAOPiN1ZF1E9pAMX51vpwB/w=
+cloud.google.com/go/workflows v1.12.5/go.mod h1:KbK5/Ef28G8MKLXcsvt/laH1Vka4CKeQj0I1/wEiByo=
+cloud.google.com/go/workflows v1.12.6/go.mod h1:oDbEHKa4otYg4abwdw2Z094jB0TLLiFGAPA78EDAKag=
+cloud.google.com/go/workflows v1.12.8/go.mod h1:b7akG38W6lHmyPc+WYJxIYl1rEv79bBMYVwEZmp3aJQ=
+cloud.google.com/go/workflows v1.12.9/go.mod h1:g9S8NdA20MnQTReKVrXCDsnPrOsNgwonY7xZn+vr3SY=
+cloud.google.com/go/workflows v1.12.10/go.mod h1:RcKqCiOmKs8wFUEf3EwWZPH5eHc7Oq0kamIyOUCk0IE=
+cloud.google.com/go/workflows v1.12.11/go.mod h1:0cYsbMDyqr/1SbEt1DfN+S+mI2AAnVrT7+Hrh7qaxZ0=
+code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
+code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo=
+code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY=
+contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
+contrib.go.opencensus.io/exporter/aws v0.0.0-20230502192102-15967c811cec/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
+contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.14/go.mod h1:5pSSGY0Bhuk7waTHuDf4aQ8D2DrhgETRo9fy6k3Xlzc=
+contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
+contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
+contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+git.sr.ht/~nelsam/hel v0.4.2/go.mod h1:6M8gpa/CGGNtFNlLN9odbQ0MzUQ2ZA8bWemVS8kTf5o=
+git.sr.ht/~nelsam/hel v0.4.3/go.mod h1:SP6fXSPxdmd7aOGMTSf8/EMLLCT438LHnZHN66nZqo0=
+git.sr.ht/~nelsam/hel/v4 v4.1.0/go.mod h1:OrTLOFJGT6G/t9q8WZy+NPOvmYUobdFZP59lpf+TrOg=
+git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20221206110420-d395f97c4830/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
+github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM=
+github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
+github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas=
+github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v63.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
+github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.6.1/go.mod h1:xNjFERdhyMqZncbNJSPBsTCddk5kwsUVUzELQPMj/LA=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0/go.mod h1:ceIuwmxDWptoW3eCqSXlnPsZFKh4X+R38dWPv7GS9Vs=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0/go.mod h1:Q28U+75mpCaSCDowNEmhIo/rmgdkqmkmzI7N6TGR4UY=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/testdata/perf v0.0.0-20240208231215-981108a6de20/go.mod h1:KMKhmwqL1TqoNRkQG2KGmDaVwT5Dte9d3PoADB38/UY=
+github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
+github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
+github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
+github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=
+github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.25/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
+github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
+github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
+github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.1.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
+github.com/GoogleCloudPlatform/cloudsql-proxy v1.34.0/go.mod h1:XNDFTVaBS0jJYam3A88dpdzImNh0RRhBF4k05CNEENs=
+github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
+github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
+github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
+github.com/KimMachineGun/automemlimit v0.5.0/go.mod h1:di3GCKiu9Y+1fs92erCbUvKzPkNyViN3mA0vti/ykEQ=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Microsoft/cosesign1go v1.2.0/go.mod h1:1La/HcGw19rRLhPW0S6u55K6LKfti+GQSgGCtrfhVe8=
+github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7rLF7yLglYbk=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -49,8 +2270,12 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@@ -58,30 +2283,348 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.17 h1:yFHH5bghP9ij5Y34PPaMOE8g//oXZ0uJQeMENVo2zcI=
-github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.7/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
+github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
+github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
+github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I=
+github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
+github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY=
+github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
+github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
-github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
+github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE=
+github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
+github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
+github.com/alecthomas/participle/v2 v2.0.0/go.mod h1:rAKZdJldHu8084ojcWevWAL8KmEU+AT+Olodb+WoN2Y=
+github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c=
+github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
+github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
+github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
+github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw=
+github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY=
+github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA=
+github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o=
+github.com/apache/beam v2.32.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o=
+github.com/apache/beam/sdks/v2 v2.47.0-RC3/go.mod h1:n8ybxT4OltLeoykTV8CGg/xQ1yrUJw13aJRkQY36lE8=
+github.com/apache/beam/sdks/v2 v2.52.0/go.mod h1:fSZrtv7H4/Z8FAveEYA9uSUVKwZR9GsGsLjGWP0IXZc=
+github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q=
+github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ=
+github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
+github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
+github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/aws/aws-sdk-go v1.43.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.44.156/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.257/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.46.4/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.50.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250=
+github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
+github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw=
+github.com/aws/aws-sdk-go-v2 v1.16.11/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo=
+github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
+github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
+github.com/aws/aws-sdk-go-v2 v1.20.1/go.mod h1:NU06lETsFm8fUC6ZjhgDpVBcGZTFQ6XM+LZWZxMI4ac=
+github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
+github.com/aws/aws-sdk-go-v2 v1.22.0/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
+github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
+github.com/aws/aws-sdk-go-v2 v1.25.3/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
+github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
+github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.3/go.mod h1:gNsR5CaXKmQSSzrmGxmwmct/r+ZBfbxorAuXYsj/M5Y=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14/go.mod h1:9NCTOURS8OpxvoAVHq79LK81/zC78hfRWFn+aL0SPcY=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
+github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA=
+github.com/aws/aws-sdk-go-v2/config v1.17.1/go.mod h1:uOxDHjBemNTF2Zos+fgG0NNfE86wn1OAHDTGxjMEYi0=
+github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY=
+github.com/aws/aws-sdk-go-v2/config v1.18.23/go.mod h1:rx0ruaQ+gk3OrLFHRRx56lA//XxP8K8uPzeNiKNuWVY=
+github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
+github.com/aws/aws-sdk-go-v2/config v1.19.0/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
+github.com/aws/aws-sdk-go-v2/config v1.20.0/go.mod h1:7+1riCZXyT+sAGvneR5j+Zl1GyfbBUNQurpQTE6FP6k=
+github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4=
+github.com/aws/aws-sdk-go-v2/config v1.27.7/go.mod h1:PH0/cNpoMO+B04qET699o5W92Ca79fVtbUnvMIZro4I=
+github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0=
+github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c=
+github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
+github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.14/go.mod h1:opAndTyq+YN7IpVG57z2CeNuXSQMqTYxGGlYH0m0RMY=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.22/go.mod h1:BfNcm6A9nSd+bzejDcMJ5RE+k6WbkCwWkQil7q4heRk=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
+github.com/aws/aws-sdk-go-v2/credentials v1.14.0/go.mod h1:q/3oaTPlamrQWHPwJe56Mjq9g1TYDgddvgTgWJtHTmE=
+github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.7/go.mod h1:UQi7LMR0Vhvs+44w5ec8Q+VS+cd10cjwgHwiVkE0YGU=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.12/go.mod h1:aZ4vZnyUuxedC7eD4JyEHpGnCz+O2sHQEx3VvAwklSE=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.0/go.mod h1:c28nJNzMVVb9TQpZ5q4tzZvwEJwf/7So7Ie2s90l1Fw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3/go.mod h1:/fYB+FZbDlwlAiynK9KDXlzZl3ANI9JkD0Uhz5FjNT4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.3.2/go.mod h1:qaqQiHSrOUVOfKe6fhgQ6UzhxjwqVW8aHNegd6Ws4w4=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.60/go.mod h1:HLWzCoNyzaPkOOs9yZ3muJ91lSk8O9DJbJw5aKAWWHY=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.91/go.mod h1:ACQ6ta5YFlfSOz2c9A+EVYawLxFMZ0rI3Q0A0tGieKo=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.18/go.mod h1:348MLhzV1GSlZSMusdwQpXKbhD7X2gbI/TxwAPKkYZQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.0/go.mod h1:F6MXWETIeetAHwFHyoHEqrcB3NpijFv9nLP5h9CXtT0=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3/go.mod h1:oQZXg3c6SNeY6OZrDY+xHcF4VGIEoNotX2B4PrDeoJI=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.12/go.mod h1:ckaCVTEdGAxO6KwTGzgskxR1xM+iJW4lxMyDFVda2Fc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.0/go.mod h1:Jg8XVv5M2V2wiAMvBFx+O59jg6Yr8vhP0bgNF/IuquM=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3/go.mod h1:vCKrdLXtybdf/uQd/YfVR2r5pcbNuEYKzMQpcxmeSJw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.19/go.mod h1:cVHo8KTuHjShb9V8/VjH3S/8+xPu16qx8fdGwmotJhE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.4.0/go.mod h1:d9YrBHJhyzDCv5UsEVRizHlFV6Q0sLemFq6uxuqWfUw=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.5/go.mod h1:aIwFF3dUk95ocCcA3zfk3nhz0oLkpzHFWuMp8l/4nNs=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9/go.mod h1:z9VXZsWA2BvZNH1dT0ToUYwMu/CR9Skkj/TBX+mceZw=
+github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.1/go.mod h1:v33JQ57i2nekYTA70Mb+O18KeH4KqhdqxTJZNK1zdRE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.3/go.mod h1:gkb2qADY+OHaGLKNTYxMaQNacfeyQpZ4csDTQMeFmcw=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.9/go.mod h1:EF5RLnD9l0xvEWwMRcktIS/dI6lF8lU5eV3B13k6sWo=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11/go.mod h1:5jHR79Tv+Ccq6rwYh+W7Nptmw++WiFafMfR42XhwNl8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.12/go.mod h1:1TODGhheLWjpQWSuhYuAUWYTCKwEjx2iblIFKDHjeTc=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.0/go.mod h1:RsPWWy7u/hwmFX57sQ7MLvrvJeYyNkiMm5BaavpoU18=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5/go.mod h1:cl9HGLV66EnCmMNzq4sYOti+/xo8w34CsgzVtm2GgsY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.1/go.mod h1:6EQZIwNNvHpq/2/QSJnp4+ECvqIy55w95Ofs0ze+nGQ=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.8/go.mod h1:JlVwmWtT/1c5W+6oUsjXjAJ0iJZ+hlghdrDy/8JxGCU=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9/go.mod h1:9TzXX3MehQNGPwCZ3ka4CpwQsoAMWSF48/b+De9rfVM=
+github.com/aws/aws-sdk-go-v2/service/kms v1.21.1/go.mod h1:EEfb4gfSphdVpRo5sGf2W3KvJbelYUno5VaXR5MJ3z4=
+github.com/aws/aws-sdk-go-v2/service/kms v1.27.9/go.mod h1:2tFmR7fQnOdQlM2ZCEPpFnBIQD1U8wmXmduBgZbOag0=
+github.com/aws/aws-sdk-go-v2/service/kms v1.29.2/go.mod h1:elLDaj+1RNl9Ovn3dB6dWLVo5WQ+VLSUMKegl7N96fY=
+github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g=
+github.com/aws/aws-sdk-go-v2/service/kms v1.35.3/go.mod h1:gjDP16zn+WWalyaUqwCCioQ8gU8lzttCCc9jYsiQI/8=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.11.1/go.mod h1:XLAGFrEjbvMCLvAtWLLP32yTv8GpBquCApZEycDLunI=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.27.1/go.mod h1:NffjpNsMUFXp6Ok/PahrktAncoekWrywvmIK83Q2raE=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.31.1/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.40.2/go.mod h1:Zjfqt7KhQK+PO1bbOsFNzKgaq7TcxzmEoDWN8lM0qzQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1/go.mod h1:hWjsYGjVuqCgfoveVcVFPXIWgz0aByzwaxKlN1StKcM=
+github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.2/go.mod h1:GvNHKQAAOSKjmlccE/+Ww2gDbwYP9EewIuvWiQSquQs=
+github.com/aws/aws-sdk-go-v2/service/sns v1.29.2/go.mod h1:ZIs7/BaYel9NODoYa8PW39o15SFAXDEb4DxOG2It15U=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.31.2/go.mod h1:J3XhTE+VsY1jDsdDY+ACFAppZj/gpvygzC5JE0bTLbQ=
+github.com/aws/aws-sdk-go-v2/service/ssm v1.49.2/go.mod h1:loBAHYxz7JyucJvq4xuW9vunu8iCzjNYfSrQg2QEczA=
+github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.17/go.mod h1:mS5xqLZc/6kc06IpXn5vRxdLaED+jEuaSRv5BxtnsiY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.16.0/go.mod h1:O7B5cpuhhJKefAKkM7onb0McmpHyKnsH4RrHJhOyq7M=
+github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.2/go.mod h1:Vv9Xyk1KMHXrR3vNQe8W5LMFdTjSeWk0gBZBzvf3Qa0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w=
+github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.18.0/go.mod h1:Td8EvzggonY02wLaqSpwybI3GbmA0PWoprKGil2uwJg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2/go.mod h1:JYzLoEVeLXk+L4tn1+rrkfhkxl6mLDEVaDSvGq9og90=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
+github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.13/go.mod h1:Ru3QVMLygVs/07UQ3YDur1AQZZp2tUNje8wfloFttC0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI=
+github.com/aws/aws-sdk-go-v2/service/sts v1.18.11/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.24.0/go.mod h1:HnCUMNz2XqwnEEk5X6oeDYB2HgOLFpJ/LyfilN8WErs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.4/go.mod h1:+K1rNPVyGxkRuv9NNiaZ4YhBFuyw2MMA9SlIJ1Zlpz8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
+github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.13.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.14.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
+github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
+github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
+github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
+github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
+github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -89,37 +2632,142 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE=
+github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI=
+github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
+github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
+github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
+github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw=
+github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
+github.com/cavaliercoder/go-rpm v0.0.0-20200122174316-8cb9fd9c31a8/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY=
+github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
+github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chainguard-dev/clog v1.4.0/go.mod h1:cV516KZWqYc/phZsCNwF36u/KMGS+Gj5Uqeb8Hlp95Y=
+github.com/chainguard-dev/slogctx v1.2.2/go.mod h1:+/TwogApSQPC+Umn9LF/T6my0KDml1XDAgpuSN/nwU4=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
+github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/cli/browser v1.2.0/go.mod h1:xFFnXLVcAyW9ni0cuo6NnrbCP75JxJ0RO7VtCBiH/oI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
+github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20231016030527-8bd2eac9fb4a/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
+github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
+github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cockroachdb/cockroach-go/v2 v2.3.3/go.mod h1:1wNJ45eSXW9AnOc3skntW9ZUZz6gxrQK3cOj3rK+BC8=
+github.com/cockroachdb/cockroach-go/v2 v2.3.5/go.mod h1:1wNJ45eSXW9AnOc3skntW9ZUZz6gxrQK3cOj3rK+BC8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
+github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c=
+github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
+github.com/container-orchestrated-devices/container-device-interface v0.5.4/go.mod h1:DjE95rfPiiSmG7uVXtg0z6MnPm/Lx4wxKCIts0ZE0vg=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -127,19 +2775,29 @@ github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
+github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
+github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
+github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -148,12 +2806,25 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1 h1:xWHPAoe6VkUiI9GAvndJM7s/0MTrmwX3AQiYTr3olf0=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
+github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
+github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
+github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ=
+github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=
+github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY=
+github.com/containerd/containerd v1.6.23/go.mod h1:UrQOiyzrLi3n4aezYJbQH6Il+YzTvnHFbEuO3yfDrM4=
+github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4=
+github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g=
+github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
+github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -161,14 +2832,27 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
+github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
+github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
+github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
+github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
+github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -178,169 +2862,650 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo=
+github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k=
+github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.3.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI=
+github.com/containerd/nri v0.6.1/go.mod h1:7+sX3wNx+LR7RzhjnJiUkFDhn18P5Bg/0VnJ/uXpRJM=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/containerd/protobuild v0.3.0/go.mod h1:5mNMFKKAwCIAkFBPiOdtRx2KiQlyEJeMXnL5R1DsWu8=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4=
+github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
+github.com/containerd/ttrpc v1.2.3/go.mod h1:ieWsXucbb8Mj9PH0rXCw1i8IunRbbAiDkpXkbfflWBM=
+github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
+github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
+github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/image/v5 v5.12.0 h1:1hNS2QkzFQ4lH3GYQLyAXB0acRMhS1Ubm6oV++8vw4w=
-github.com/containers/image/v5 v5.12.0/go.mod h1:VasTuHmOw+uD0oHCfApQcMO2+36SfyncoSahU7513Xs=
-github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
-github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
+github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
+github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
+github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
+github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE=
+github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E=
-github.com/containers/storage v1.32.1 h1:JgvHY5dokiff+Ee4TdvPYO++Oq2BAave5DmyPetH2iU=
-github.com/containers/storage v1.32.1/go.mod h1:do6oIF71kfkVS3CPUZr+6He94fIaj6pzF8ywevPuuOw=
+github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
+github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
+github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34=
+github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
+github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
+github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
+github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI=
+github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
+github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
+github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
+github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
+github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
+github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
+github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
+github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
+github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
+github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps=
+github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms=
+github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM=
+github.com/dave/courtney v0.3.0/go.mod h1:BAv3hA06AYfNUjfjQr+5gc6vxeBVOupLqrColj+QSD8=
+github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
+github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk=
+github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
+github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc=
+github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
+github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
+github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M=
+github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
+github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/digitalocean/godo v1.78.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs=
+github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
+github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA=
+github.com/digitalocean/godo v1.108.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
+github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
+github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
-github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v25.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
+github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
+github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
+github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
+github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
+github.com/eggsampler/acme/v3 v3.3.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
+github.com/eggsampler/acme/v3 v3.4.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
+github.com/eggsampler/acme/v3 v3.6.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
+github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0=
+github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA=
+github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
+github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs=
+github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
+github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
+github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
+github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
+github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
+github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
+github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
+github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
+github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsouza/fake-gcs-server v1.47.6/go.mod h1:ApSXKexpG1BUXJ4f2tNCxvhTKwCPFqFLBDW2UNQDODE=
+github.com/fsouza/slognil v0.3.1/go.mod h1:Q8sD2VtWIDCZxQCLRKFwfRYu5/jU5pqplclsz4fQKF4=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
+github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o=
+github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
+github.com/fullstorydev/grpcurl v1.8.2/go.mod h1:YvWNT3xRp2KIRuvCphFodG0fKkMXwaxA9CJgKCcyzUQ=
+github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E=
+github.com/fullstorydev/grpcurl v1.8.9/go.mod h1:PNNKevV5VNAV2loscyLISrEnWQI61eqR0F8l3bVadAA=
+github.com/fullstorydev/grpcurl v1.9.1/go.mod h1:i8gKLIC6s93WdU3LSmkE5vtsCxyRmihUj5FK1cNW5EM=
+github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/glebarez/go-sqlite v1.21.1/go.mod h1:ISs8MF6yk5cL4n/43rSOmVMGJJjHYr7L2MbZZ5Q4E2E=
+github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gorp/gorp v2.0.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E=
+github.com/go-gorp/gorp v2.2.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E=
+github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-ini/ini v1.66.6/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
+github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
+github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
+github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
+github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
+github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
+github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
+github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
+github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk=
+github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs=
+github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
+github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
+github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
+github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
+github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-redis/redismock/v9 v9.2.0/go.mod h1:18KHfGDK4Y6c2R0H38EUGWAdc7ZQS9gfYxc94k7rWT0=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
+github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
+github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
+github.com/go-rod/rod v0.112.9/go.mod h1:l0or0gEnZ7E5C0L/W7iD+yXBnm/OM3avP1ji74k8N9s=
+github.com/go-rod/rod v0.114.7/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw=
+github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.8.0/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
+github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/goadesign/goa v2.2.5+incompatible/go.mod h1:d/9lpuZBK7HFi/7O0oXfwvdoIl+nx2bwKqctZe/lQao=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/attrs v1.0.2/go.mod h1:tJ7wJj6XbMNhYwJ8fl2PFDpDcUfsG1spWdUJISvPAZQ=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.10.1/go.mod h1:AWx4++KnNOW3JOeEvhSaq+mvgAvnMYOY1XSIin4Mago=
+github.com/gobuffalo/fizz v1.14.2/go.mod h1:pZp2NZYEiPRoylV3lKIz0XZOOupizz+SnKq9wb1idxE=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.2.5/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/genny/v2 v2.0.12/go.mod h1:KtMtTcR/U2kHbQxhjCVA16ph6rjBnhw39f6aaxl4hMk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/github_flavored_markdown v1.1.1/go.mod h1:yU32Pen+eorS58oxh/bNZx76zUOCJwmvyV5FBrvzOKQ=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/helpers v0.6.5/go.mod h1:LA4zcc89tkZsfKpJIWsXLibiqTgZQ4EvDszfxdqr9ZA=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/nulls v0.4.1/go.mod h1:pp8e1hWTRJZFpMl4fj/CVbSMlaxjeGKkFq4RuBZi3w8=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/plush/v4 v4.1.13/go.mod h1:s3hUyj/JlwEiJ039OBJevojq9xT40D1pgekw0o88CVU=
+github.com/gobuffalo/pop/v6 v6.0.6/go.mod h1:toTxNJnsSuSlyK6w0yGb4YXSNIHsi2chQYC2CjBF9Ac=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gobuffalo/tags/v3 v3.1.3/go.mod h1:WAAjKdskZUmdi6EkNjP2SXBwBwRovHsjJsPJbBiPlKc=
+github.com/gobuffalo/validate/v3 v3.3.2/go.mod h1:jiEEw+N7KbAP2aInFxGnfitI0g7HjXqcp5hDD6TaQDU=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
+github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE=
+github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
+github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -356,215 +3521,981 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
+github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs=
+github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ=
+github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6/go.mod h1:aF2dp7Dh81mY8Y/zpzyXps4fQW5zQbDu2CxfpJB6NkI=
+github.com/google/certificate-transparency-go v1.1.2/go.mod h1:3OL+HKDqHPUfdKrHVQxO6T8nDLO0HF7LRTlkIWXaWvQ=
+github.com/google/certificate-transparency-go v1.1.6/go.mod h1:0OJjOsOk+wj6aYQgP7FU0ioQ0AJUmnWPFMqTjQeazPQ=
+github.com/google/certificate-transparency-go v1.2.1/go.mod h1:bvn/ytAccv+I6+DGkqpvSsEdiVGramgaSC6RD3tEmeE=
+github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-attestation v0.3.2/go.mod h1:N0ADdnY0cr7eLJyZ75o8kofGGTUF2XrZTJuTPo5acwk=
+github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9/go.mod h1:KDsPHk8a2MX9g20kYSdxB21t7je5NghSaFeVn0Zu3Ao=
+github.com/google/go-attestation v0.4.4-0.20230613144338-a9b6eb1eb888/go.mod h1:xCfWZojUHwedNcs780T8cblW9XHss9XKD2s3U44FVbo=
+github.com/google/go-attestation v0.5.0/go.mod h1:0Tik9y3rzV649Jcr7evbljQHQAsIlJucyqQjYDBqktU=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-configfs-tsm v0.2.2/go.mod h1:EL1GTDFMb5PZQWDviGfZV9n87WeGTR/JUg13RfwkgRo=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
+github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
+github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
+github.com/google/go-github/v50 v50.2.0/go.mod h1:VBY8FB6yPIjrtKhozXv4FQupxKLS6H4m6xFZlT43q8Q=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
+github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
+github.com/google/go-licenses v1.6.0/go.mod h1:Z8jgz2isEhdenOqd/00pq7I4y4k1xVVQJv415otjclo=
+github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
+github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
+github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=
+github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
+github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks=
+github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
+github.com/google/go-sev-guest v0.4.1/go.mod h1:UEi9uwoPbLdKGl1QHaq1G8pfCbQ4QP0swWX4J0k6r+Q=
+github.com/google/go-sev-guest v0.6.1/go.mod h1:UEi9uwoPbLdKGl1QHaq1G8pfCbQ4QP0swWX4J0k6r+Q=
+github.com/google/go-sev-guest v0.8.0/go.mod h1:hc1R4R6f8+NcJwITs0L90fYWTsBpd1Ix+Gur15sqHDs=
+github.com/google/go-sev-guest v0.9.3/go.mod h1:hc1R4R6f8+NcJwITs0L90fYWTsBpd1Ix+Gur15sqHDs=
+github.com/google/go-tdx-guest v0.3.1/go.mod h1:/rc3d7rnPykOPuY8U9saMyEps0PZDThLk/RygXm04nE=
+github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI=
+github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw=
+github.com/google/go-tpm v0.3.2/go.mod h1:j71sMBTfp3X5jPHz852ZOfQMUOf65Gb/Th8pRmp7fvg=
+github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4=
+github.com/google/go-tpm v0.3.4-0.20230613064043-511507721cb1/go.mod h1:Yj9bYgsIKoza8oMlxZqvqgUIDKFaExnuLaDdOtFCwG4=
+github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
+github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0=
+github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4=
+github.com/google/go-tpm-tools v0.2.1/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4=
+github.com/google/go-tpm-tools v0.3.1/go.mod h1:PSg+r5hSZI5tP3X7LBQx2sW1VSZUqZHBSrKyDqrB21U=
+github.com/google/go-tpm-tools v0.3.10/go.mod h1:HQfQboO+M8pRtBfO5U3KMhwzfC/XC3TaMCgRfTpII8Q=
+github.com/google/go-tpm-tools v0.3.12/go.mod h1:2OtmyPGPuaWWIOjr+IDhNQb6t5njjbSmZtzc350Q6Ro=
+github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba/go.mod h1:EFYHy8/1y2KfgTAsx7Luu7NGhoxtuVHnNo8jE7FikKc=
+github.com/google/go-tpm-tools v0.4.3/go.mod h1:T8jXkp2s+eltnCDIsXR84/MTcVU9Ja7bh3Mit0pa4AY=
+github.com/google/go-tpm-tools v0.4.4/go.mod h1:T8jXkp2s+eltnCDIsXR84/MTcVU9Ja7bh3Mit0pa4AY=
+github.com/google/go-tspi v0.2.1-0.20190423175329-115dea689aad/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI=
+github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
+github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148/go.mod h1:rq9F0RSpNKlrefnf6ZYMHKUnEJBCNzf6AcCXMYBeYvE=
+github.com/google/logger v1.1.1/go.mod h1:BkeJZ+1FhQ+/d087r4dzojEg1u2ZX+ZqG1jTUrLM+zQ=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
+github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
+github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
+github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
+github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8=
+github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
+github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
+github.com/google/rpmpack v0.6.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
+github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
+github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM=
+github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
+github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ=
+github.com/google/trillian v1.3.14-0.20210511103300-67b5f349eefa/go.mod h1:s4jO3Ai4NSvxucdvqUHON0bCqJyoya32eNw6XJwsmNc=
+github.com/google/trillian v1.4.0/go.mod h1:1Bja2nEgMDlEJWWRXBUemSPG9qYw84ZYX2gHRVHlR+g=
+github.com/google/trillian v1.5.2/go.mod h1:H8vOoa2dxd3xCdMzOOwt9kIz/3MSoJhcqLJGG8iRwbg=
+github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI=
+github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
+github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
+github.com/googleapis/cloud-bigtable-clients-test v0.0.0-20221104150409-300c96f7b1f5/go.mod h1:Udm7et5Lt9Xtzd4n07/kKP80IdlR4zVDjtlUZEO2Dd8=
+github.com/googleapis/cloud-bigtable-clients-test v0.0.0-20230505150253-16eeee810d3a/go.mod h1:2n/InOx7Q1jaqXZJ0poJmsZxb6K+OfHEbhA/+LPJrII=
+github.com/googleapis/cloud-bigtable-clients-test v0.0.2/go.mod h1:mk3CrkrouRgtnhID6UZQDK3DrFFa7cYCAJcEmNsHYrY=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
+github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
+github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
+github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
+github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
+github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
+github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
+github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ=
+github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
+github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
+github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
+github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
+github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.21.0/go.mod h1:nCLIt0w3Ept2NwF8ThLmrppXsfT07oC8k0XNDxd8sVU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
+github.com/hamba/avro/v2 v2.17.2/go.mod h1:Q9YK+qxAhtVrNqOhwlZTATLgLA8qxG2vtvkhK8fJ7Jo=
+github.com/hanwen/go-fuse/v2 v2.5.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
+github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo=
+github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg=
+github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g=
+github.com/hashicorp/consul/api v1.27.0/go.mod h1:JkekNRSou9lANFdt+4IKx3Za7XY0JzzpQjEb4Ivo1c8=
+github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0=
+github.com/hashicorp/consul/sdk v0.14.0/go.mod h1:gHYeuDa0+0qRAD6Wwr6yznMBvBwHKoxSBoW5l73+saE=
+github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg=
+github.com/hashicorp/consul/sdk v0.15.1/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A=
+github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A=
+github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
+github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-retryablehttp v0.7.6/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
+github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg=
+github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE=
+github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
+github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM=
+github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs=
+github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE=
+github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk=
+github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
+github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA=
+github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q=
+github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/honeycombio/beeline-go v1.10.0/go.mod h1:Zz5WMeQCJzFt2Mvf8t6HC1X8RLskLVR/e8rvcmXB1G8=
+github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0=
+github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
+github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
+github.com/ianlancetaylor/demangle v0.0.0-20220517205856-0058ec4f073c/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
+github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
+github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
+github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
+github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
+github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
+github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
+github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
+github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
+github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
+github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
+github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI=
+github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ=
+github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
+github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk=
+github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels=
+github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
+github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
+github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU=
+github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4=
+github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.4.1-0.20181029123624-5de817a9aa20/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
+github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
+github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
+github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
+github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
+github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
+github.com/jhump/protoreflect v1.9.0/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
+github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
+github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
+github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k=
+github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8=
github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w=
github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jmattheis/goverter v1.3.0/go.mod h1:Il/E+0riIfIgRBUpM+Fnh2s8/sJhMp5NeDZZenTd6S4=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo=
+github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
+github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
+github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/johannesboyne/gofakes3 v0.0.0-20221110173912-32fb85c5aed6/go.mod h1:LIAXxPvcUXwOcTIj9LSNSUpE9/eMHalTWxsP/kmWxQI=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
+github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
+github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
+github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
+github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
+github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
+github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
+github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
+github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
+github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
+github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
+github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
+github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
+github.com/lestrrat-go/jwx v1.2.28/go.mod h1:nF+91HEMh/MYFVwKPl5HHsBGMPscqbQb+8IDQdIazP8=
+github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/letsencrypt/borp v0.0.0-20230707160741-6cc6ce580243/go.mod h1:podMDq5wDu2ZO6JMKYQcjD3QdqOfNLWtP2RDSy8CHUU=
+github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
+github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
+github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
+github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
+github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc=
+github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
+github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158/go.mod h1:ZFNBS3H6OEsprCRjscty6GCBe5ZiX44x6qY4s7+bDX0=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
+github.com/linkedin/goavro/v2 v2.12.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk=
+github.com/linode/linodego v1.4.0/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8=
+github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk=
+github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ=
+github.com/linode/linodego v1.27.1/go.mod h1:5oAsx+uinHtVo6U77nXXXtox7MWzUW6aEkTOKXxA9uo=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
+github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
+github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo=
-github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
-github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
-github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
+github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
+github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.7.0/go.mod h1:RVP6/F85JyxTrbJxWIdKU2vlSvK48iCMnMXRkSz7xtg=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
+github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
+github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
+github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
+github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U=
+github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
+github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
+github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
+github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c=
+github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
+github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
+github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
+github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
+github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
+github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
+github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
+github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
+github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
+github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
+github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
+github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
+github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
+github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
-github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
+github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
+github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
+github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
+github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
+github.com/nats-io/nats.go v1.34.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
+github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
+github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
+github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
+github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
+github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
+github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
+github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw=
+github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
+github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
+github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
+github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
+github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
+github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
+github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
+github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
+github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
+github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
+github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
+github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
+github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
+github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
+github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
+github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk=
+github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
+github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
+github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
+github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
+github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
+github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s=
+github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -573,68 +4504,229 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
-github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0=
-github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w=
+github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
+github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA=
-github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
-github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
+github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
+github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
+github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
+github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
+github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
+github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
+github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA=
+github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M=
+github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
+github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o=
+github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI=
+github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
+github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
+github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
+github.com/poy/onpar v0.3.1/go.mod h1:WJueGwprgKVeX1mfOJfCxEJNbyhYiY0lWw2i18UaEi0=
+github.com/poy/onpar v0.3.2/go.mod h1:6XDWG8DJ1HsFX6/Btn0pHl3Jz5d1SEEGNZ5N1gtYo+I=
+github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
+github.com/poy/onpar/v3 v3.0.0-20230319125606-a35e84953e8f/go.mod h1:nJ2Lyw+ezwTJHxo7CRPqV2o/3aA3fX6fQHsDNLT934g=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
-github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
+github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
+github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w=
+github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
+github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
+github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec=
+github.com/prometheus/common v0.41.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
+github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
+github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
+github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
+github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
+github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
+github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
+github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
+github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
+github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec=
+github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY=
+github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -643,156 +4735,916 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/prometheus v0.35.0/go.mod h1:7HaLx5kEPKJ0GDgbODG0fZgXbQ8K/XjZNJXQmbmgQlY=
+github.com/prometheus/prometheus v0.43.1/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM=
+github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M=
+github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/proullon/ramsql v0.0.0-20211120092837-c8d0a408b939/go.mod h1:jG8oAQG0ZPHPyxg5QlMERS31airDC+ZuqiAe8DUvFVo=
+github.com/proullon/ramsql v0.1.3/go.mod h1:CFGqeQHQpdRfWqYmWD3yXqPTEaHkF4zgXy1C6qDWc9E=
+github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
+github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg=
+github.com/pseudomuto/protoc-gen-doc v1.5.0/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg=
+github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM=
+github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
+github.com/qur/ar v0.0.0-20130629153254-282534b91770/go.mod h1:SjlYv2m9lpV0UW6K7lDqVJwEIIvSjaHbGk7nIfY8Hxw=
+github.com/rakyll/embedmd v0.0.0-20171029212350-c8060a0752a2/go.mod h1:7jOTMgqac46PZcF54q6l2hkLEG8op93fZu61KmxWDV4=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
+github.com/redis/go-redis/v9 v9.2.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/redis/go-redis/v9 v9.3.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
+github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
+github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
+github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
+github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.2.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ=
+github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
+github.com/sagikazarmark/crypt v0.17.0/go.mod h1:SMtHTvdmsZMuY/bpZoqokSoChIrcJ/epOxZN58PbZDg=
+github.com/sagikazarmark/crypt v0.19.0/go.mod h1:c6vimRziqqERhtSe0MhIvzE1w54FrCHtrXb5NH/ja78=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I=
+github.com/sassoftware/go-rpmutils v0.3.0/go.mod h1:hM9wdxFsjUFR/tJ6SMsLrJuChcucCa0DsCzE9RMfwMo=
+github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk=
+github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
+github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
+github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM=
+github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
+github.com/shirou/gopsutil/v3 v3.23.8/go.mod h1:7hmCaBn+2ZwaZOr6jmPBZDfawwMGuo1id3C6aM8EDqQ=
+github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.2/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/shoenig/test v0.6.6/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
+github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
+github.com/sigstore/protobuf-specs v0.3.0/go.mod h1:ynKzXpqr3dUj2Xk9O/5ZUhjnpi0F53DNi5AdH6pS3jc=
+github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
+github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
+github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA=
+github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
+github.com/sigstore/sigstore v1.8.8/go.mod h1:GW0GgJSCTBJY3fUOuGDHeFWcD++c4G8Y9K015pwcpDI=
+github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
+github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc=
+github.com/smallstep/go-attestation v0.4.4-0.20240109183208-413678f90935/go.mod h1:vNAduivU014fubg6ewygkAvQC0IQVXqdc8vaGl/0er4=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
+github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
+github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
+github.com/spiffe/go-spiffe/v2 v2.1.3/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk=
+github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY=
+github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg=
+github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
+github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
+github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
+github.com/testcontainers/testcontainers-go v0.19.0/go.mod h1:3YsSoxK0rGEUzbGD4gUVt1Nm3GJpCIq94GX+2LSf3d4=
+github.com/testcontainers/testcontainers-go v0.25.0/go.mod h1:4sC9SiJyzD1XFi59q8umTQYWxnkweEc5OjVtTUlJzqQ=
+github.com/tetratelabs/wazero v1.0.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ=
+github.com/tetratelabs/wazero v1.5.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A=
+github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU=
+github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
+github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug=
+github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
+github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
+github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
+github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
+github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
+github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
-github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
+github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
-github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI=
-github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.7/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
+github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
+github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dmRIc0=
+github.com/uwu-tools/magex v0.10.0/go.mod h1:TrSEhrL1xHfJVy6n05AUwFdcQndgwrbgL5ybPNKWmVY=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
+github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
+github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ=
+github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk=
+github.com/vburenin/ifacemaker v1.2.1/go.mod h1:5WqrzX2aD7/hi+okBjcaEQJMg4lDGrpuEX3B8L4Wgrs=
+github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
+github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
+github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
+github.com/veraison/go-cose v1.2.1/go.mod h1:t6V8WJzHm1PD5HNsuDjW3KLv577uWb6UTzbZGvdQHD8=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
+github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
+github.com/weppos/publicsuffix-go v0.12.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
+github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
+github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
+github.com/weppos/publicsuffix-go v0.20.1-0.20221031080346-e4081aa8a6de/go.mod h1:g9GsAxnaxsUuTLZcQdYbi43vT2k9ubZGHsdCy819VLk=
+github.com/weppos/publicsuffix-go v0.30.0/go.mod h1:kBi8zwYnR0zrbm8RcuN1o9Fzgpnnn+btVN8uWPMyXAY=
+github.com/weppos/publicsuffix-go v0.30.1-0.20230620154423-38c92ad2d5c6/go.mod h1:wdMq89hDN07Zqr0yqYAXIBTJXl4MEELx+HYHOZdf5gM=
+github.com/weppos/publicsuffix-go v0.30.2-0.20230730094716-a20f9abcc222/go.mod h1:s41lQh6dIsDWIC1OWh7ChWJXLH0zkJ9KHZVqA7vHyuQ=
+github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d/go.mod h1:vLdXKydr/OJssAXmjY0XBgLXUfivBMrNRIBljgtqCnw=
+github.com/weppos/publicsuffix-go/publicsuffix/generator v0.0.0-20220927085643-dc0d00c92642/go.mod h1:GHfoeIdZLdZmLjMlzBftbTDntahTttUMWjxZwQJhULE=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
+github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww=
+github.com/xitongsys/parquet-go v1.6.2/go.mod h1:IulAQyalCm0rPiZVNnCgm/PCL64X2tdSVGMQ/UeKqWA=
+github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA=
+github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE=
+github.com/xitongsys/parquet-go-source v0.0.0-20220315005136-aec0fe3e777c/go.mod h1:qLb2Itmdcp7KPa5KZKvhE9U1q5bYSOmgeOckF/H2rQA=
+github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
+github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/ysmood/fetchup v0.2.2/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
+github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
+github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
+github.com/ysmood/gop v0.0.2/go.mod h1:rr5z2z27oGEbyB787hpEcx4ab8cCiPnKxn0SUHt6xzk=
+github.com/ysmood/gop v0.2.0/go.mod h1:rr5z2z27oGEbyB787hpEcx4ab8cCiPnKxn0SUHt6xzk=
+github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM=
+github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
+github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
+github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
+github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
+github.com/zmap/zcertificate v0.0.1/go.mod h1:q0dlN54Jm4NVSSuzisusQY0hqDWvu92C+TWveAxiVWk=
+github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ=
+github.com/zmap/zcrypto v0.0.0-20201211161100-e54a5822fb7e/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ=
+github.com/zmap/zcrypto v0.0.0-20220402174210-599ec18ecbac/go.mod h1:egdRkzUylATvPkWMpebZbXhv0FMEMJGX/ur0D3Csk2s=
+github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
+github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c/go.mod h1:GSDpFDD4TASObxvfZfvpZZ3OWHIUHMlhVWlkOe4ewVk=
+github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8=
+github.com/zmap/zlint/v3 v3.4.0/go.mod h1:WgepL2QqxyMHnrOWJ54NqrgfMtOyuXr52wEE0tcfo9k=
+github.com/zmap/zlint/v3 v3.5.0/go.mod h1:JkNSrsDJ8F4VRtBZcYUQSvnWFL7utcjDIn+FE64mlBI=
+github.com/zmap/zlint/v3 v3.6.0/go.mod h1:NVgiIWssgzp0bNl8P4Gz94NHV2ep/4Jyj9V69uTmZyg=
+go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M=
+go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
+go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
+go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
+go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
+go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
+go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
+go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
+go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
+go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
+go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
+go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
+go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
+go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
+go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8=
+go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
+go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
+go.etcd.io/etcd/client/v2 v2.305.8/go.mod h1:ZlAsxDK5/10I6xVHhFo9zinCMr/DDLKFetDDXlzKwqE=
+go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
+go.etcd.io/etcd/client/v2 v2.305.11/go.mod h1:vX2j5tMynwOateY6BfVmLol3gYOIkbhqjs/BqRsdIOw=
+go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E=
+go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
+go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
+go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
+go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
+go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
+go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE=
+go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
+go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI=
+go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo=
+go.etcd.io/etcd/etcdctl/v3 v3.5.0/go.mod h1:vGTfKdsh87RI7kA2JHFBEGxjQEYx+pi299wqEOdi34M=
+go.etcd.io/etcd/etcdctl/v3 v3.5.8/go.mod h1:ur5LRmFUo1Rcnwe8i71YVtH5y4RjsW/6YNmTQlz1N8E=
+go.etcd.io/etcd/etcdctl/v3 v3.5.11/go.mod h1:pPQfzNI29HES8lmvUR4xO2vdMMnjUjdL/RI3n6xle+w=
+go.etcd.io/etcd/etcdctl/v3 v3.5.13/go.mod h1:+EKywV/K3xA/OIUWlTKzzhYMpepqQN+KZQ71F85YTuk=
+go.etcd.io/etcd/etcdutl/v3 v3.5.0/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg=
+go.etcd.io/etcd/etcdutl/v3 v3.5.8/go.mod h1:ttDqxIhpW9PqQdw+jJhUpLF+f37TnnSRJpkJSkCikts=
+go.etcd.io/etcd/etcdutl/v3 v3.5.11/go.mod h1:p7jTKqjyEjl/lyTzhHuQ5kqQt1FhQlRHw8TK90WjHQI=
+go.etcd.io/etcd/etcdutl/v3 v3.5.13/go.mod h1:2vhvTIQobP+Cb04qzlcbKGvX6J5oq/N1kquk1yCDIQY=
+go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE=
+go.etcd.io/etcd/pkg/v3 v3.5.8/go.mod h1:C17MJkZHJIyJV+wWWx6Jz6YS6BfdkOnUkSwT9uuEO7s=
+go.etcd.io/etcd/pkg/v3 v3.5.11/go.mod h1:bLfwo6YEgpOAMBZJsZg5AiSS+mxNTRJi15Dvp9kKW68=
+go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
+go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI=
+go.etcd.io/etcd/raft/v3 v3.5.8/go.mod h1:W6P5WxtOMfYNdLSEJX3vc8Pg6LOt+ewI9UCFKcnIexA=
+go.etcd.io/etcd/raft/v3 v3.5.11/go.mod h1:Tp7kZJVtWJWLiMCPrgkimiOB5ZYi8YM93onQihpG724=
+go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
+go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc=
+go.etcd.io/etcd/server/v3 v3.5.8/go.mod h1:1y2ahPOrlE4pzVV5+rBCDur7QQcEP0MNUPO2dyzwjso=
+go.etcd.io/etcd/server/v3 v3.5.11/go.mod h1:CS0+TwcuRlhg1I5CpA3YlisOcoqJB1h1GMRgje75uDs=
+go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
+go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30=
+go.etcd.io/etcd/tests/v3 v3.5.0/go.mod h1:f+mtZ1bE1YPvgKdOJV2BKy4JQW0nAFnQehgOE7+WyJE=
+go.etcd.io/etcd/tests/v3 v3.5.8/go.mod h1:KFgEgZWwXZILKsOX3f3VyGmUHQgbG/yM9edQNTkXsdE=
+go.etcd.io/etcd/tests/v3 v3.5.11/go.mod h1:XZ38aoIXYjFUDbltGqhZtnMQ6PGWn90sozJ1mZzhrCM=
+go.etcd.io/etcd/tests/v3 v3.5.13/go.mod h1:7oiEl6JJWXvTuZP86ghSKZ7z1llzGR1g7PntZ7C0GHU=
+go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To=
+go.etcd.io/etcd/v3 v3.5.0/go.mod h1:FldM0/VzcxYWLvWx1sdA7ghKw7C3L2DvUTzGrcEtsC4=
+go.etcd.io/etcd/v3 v3.5.8/go.mod h1:vptUbt0q/yZF9X3xyZ0Q7XHoYJ8okR64FCRpZjz4Zyg=
+go.etcd.io/etcd/v3 v3.5.11/go.mod h1:m0bASlgry4RtOxY5i+VJ/VmxdMav0/ShPaMF38/6lJM=
+go.etcd.io/etcd/v3 v3.5.13/go.mod h1:S5/hQwdeJgz8vHxHi3IF4c74jrCC8d5EethiCuLNeis=
+go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
+go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
+go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
+go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
+go.mongodb.org/mongo-driver v1.11.0/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
+go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
+go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
+go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0=
+go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
+go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
+go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
+go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/collector/featuregate v1.0.1/go.mod h1:QQXjP4etmJQhkQ20j4P/rapWuItYxoFozg/iIwuKnYg=
+go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4=
+go.opentelemetry.io/collector/pdata v1.0.1/go.mod h1:jutXeu0QOXYY8wcZ/hege+YAnSBP3+jpTqYU1+JTI5Y=
+go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
+go.opentelemetry.io/collector/semconv v0.93.0/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.0/go.mod h1:YjmsSWM1VTcWXFSgyrmLADPMZZohioz9onjgkikk59w=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.41.0/go.mod h1:xmv4aGDeCpkNeyGH0iKgaj/E6XPeRqG20QF2IC7UXr0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ=
+go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ=
+go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
+go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
+go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
+go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
+go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
+go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
+go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
+go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
+go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
+go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.1/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.0/go.mod h1:uOTV75+LOzV+ODmL8ahRLWkFA3eQcSC2aAsbxIu4duk=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.1/go.mod h1:YJ/JbY5ag/tSQFXzH3mtDmHqzF3aFn3DI/aB1n7pt4w=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.0/go.mod h1:pvkFJxNUXyJ5i8u6m8NIcqkoOf/65VM2mSyBbBJfeVQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.1/go.mod h1:UJJXJj0rltNIemDMwkOJyggsvyMG9QHfJeFH0HS5JjM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.0/go.mod h1:RPagkaZrpwD+rSwQjzos6rBLsHOvenOqufCj4/7I46E=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.1/go.mod h1:DAKwdo06hFLc0U88O10x4xnb5sc7dDRDqRuiN+io8JE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw=
+go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
+go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
+go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s=
+go.opentelemetry.io/otel/metric v0.38.0/go.mod h1:uAtxN5hl8aXh5irD8afBtSwQU5Zjg64WWSz6KheZxBg=
+go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
+go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
+go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
+go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
+go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
+go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E=
+go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
+go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
+go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
+go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
+go.opentelemetry.io/otel/sdk v1.15.0/go.mod h1:XDEMrYWzJ4YlC17i6Luih2lwDw2j6G0PkUfr1ZqE+rQ=
+go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
+go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
+go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
+go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
+go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
+go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE=
+go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0=
+go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
+go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
+go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
+go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
+go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
+go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
+go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
+go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
+go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
+go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
+go.opentelemetry.io/proto/otlp v0.12.1/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ=
+go.step.sm/crypto v0.51.1/go.mod h1:PdrhttNU/tG9/YsVd4fdlysBN+UV503p0o2irFZQlAw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
+go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
+go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+goa.design/goa v2.2.5+incompatible/go.mod h1:NnzBwdNktihbNek+pPiFMQP9PPFsUt8MMPPyo9opDSo=
+gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
+gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco=
+goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
+golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/exp v0.0.0-20220328175248-053ad81199eb/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
+golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/exp v0.0.0-20230807204917-050eac23e9de/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
+golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
+golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -803,6 +5655,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -811,16 +5665,42 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -830,10 +5710,15 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -841,30 +5726,171 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220921155015-db77216a4ee9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220926192436-02166a98028e/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
+golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
+golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
+golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
+golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
+golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
+golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
+golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -873,36 +5899,53 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -912,70 +5955,273 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210316092937-0b90fd5c4c48/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
+golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
+golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -983,41 +6229,238 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
+golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
+golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
+golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
+golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
+golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA=
+google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
+google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E=
+google.golang.org/api v0.121.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
+google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
+google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
+google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
+google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
+google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk=
+google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU=
+google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI=
+google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
+google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
+google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
+google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
+google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g=
+google.golang.org/api v0.160.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw=
+google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw=
+google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
+google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o=
+google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o=
+google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA=
+google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA=
+google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
+google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8=
+google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o=
+google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
+google.golang.org/api v0.175.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8=
+google.golang.org/api v0.176.1/go.mod h1:j2MaSDYcvYV1lkZ1+SMW4IeF90SrEyFA+tluDYWRrFg=
+google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw=
+google.golang.org/api v0.178.0/go.mod h1:84/k2v8DFpDRebpGcooklv/lais3MEfqpaBLA12gl2U=
+google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE=
+google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM=
+google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
+google.golang.org/api v0.184.0/go.mod h1:CeDTtUEiYENAf8PPG5VZW2yNp2VM3VWbCeTioAZBTBA=
+google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc=
+google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk=
+google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
+google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
+google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
+google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E=
+google.golang.org/api v0.193.0/go.mod h1:Po3YMV1XZx+mTku3cfJrlIYR03wiGrCOsdpC67hjZvw=
+google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
@@ -1028,15 +6471,341 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210427215850-f767ed18ee4d/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
+google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY=
+google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
+google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0=
+google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM=
+google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
+google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
+google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
+google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
+google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU=
+google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
+google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE=
+google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
+google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
+google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
+google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
+google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0=
+google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
+google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
+google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M=
+google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
+google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
+google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U=
+google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw=
+google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ=
+google.golang.org/genproto v0.0.0-20240604185151-ef581f913117/go.mod h1:lesfX/+9iA+3OdqeCpoDddJaNxVB1AB6tD7EfqMmprc=
+google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ=
+google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M=
+google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
+google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
+google.golang.org/genproto v0.0.0-20240725213756-90e476079158/go.mod h1:od+6rA98elHRdDlQTg6Lok9YQJ8hYumTbgVBUbM/YXw=
+google.golang.org/genproto v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Sk3mLpoDFTAp6R4OvlcUgaG4ISTspKeFsIAXMn9Bm4Y=
+google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M=
+google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4=
+google.golang.org/genproto v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:JB1IzdOfYpNW7QBoS3aYEw5Zl2Q3OEeNWY/Nb99hSyk=
+google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
+google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U=
+google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
+google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww=
+google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
+google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg=
+google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
+google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI=
+google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg=
+google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg=
+google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
+google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:PVreiBMirk8ypES6aw9d4p6iiBNSIfZEBqr3UGoAi2E=
+google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
+google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:K4kfzHtI0kqWA79gecJarFtDn/Mls+GxQcg3Zox91Ac=
+google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c=
+google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
+google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6/go.mod h1:10yRODfgim2/T8csjQsMPgZOMvtytXKTDRzH6HRGzRw=
+google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y=
+google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
+google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
+google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE=
+google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
+google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240722135656-d784300faade/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
+google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
+google.golang.org/genproto/googleapis/api v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:OFMYQFHJ4TM3JRlWDZhJbZfra2uqc3WLBZiaaqP4DtU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230711160842-782d3b101e98/go.mod h1:3QoBVwTHkXbY1oRGzlhwhOykfcATQN43LJ6iT8Wy8kE=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:+34luvCflYKiKylNwGJfn9cFBbcL/WrkciMmDmsTQ/A=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231212172506-995d672761c0/go.mod h1:guYXGPwC6jwxgWKW5Y405fKWOFNwlvUlUnzyp9i0uqo=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:ZSvZ8l+AWJwXw91DoTjWjaVLpWU6o0eZ4YLYpH8aLeQ=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:SCz6T5xjNXM4QFPRwxHcfChp7V+9DcXR3ay2TkHR8Tg=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240205150955-31a09d347014/go.mod h1:EhZbXt+eY4Yr3YVaEGLdNZF5viWowOJZ8KTPqjYMKzg=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:om8Bj876Z0v9ei+RD1LnEWig7vpHQ371PUqsgjmLQEA=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:vh/N7795ftP0AkN1w8XKqN4w1OdUKXW5Eummda+ofv8=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240311132316-a219d84964c2/go.mod h1:vh/N7795ftP0AkN1w8XKqN4w1OdUKXW5Eummda+ofv8=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240314234333-6e1732d8331c/go.mod h1:IN9OQUXZ0xT+26MDwZL8fJcYw+y99b0eYPA2U15Jt8o=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240318140521-94a12d6c2237/go.mod h1:IN9OQUXZ0xT+26MDwZL8fJcYw+y99b0eYPA2U15Jt8o=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:IN9OQUXZ0xT+26MDwZL8fJcYw+y99b0eYPA2U15Jt8o=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240429193739-8cf5692501f6/go.mod h1:ULqtoQMxDLNRfW+pJbKA68wtIy1OiYjdIsJs3PMpzh8=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240521202816-d264139d666e/go.mod h1:0J6mmn3XAEjfNbPvpH63c0RXCjGNFcCzlEfWSN4In+k=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240528184218-531527333157/go.mod h1:0J6mmn3XAEjfNbPvpH63c0RXCjGNFcCzlEfWSN4In+k=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240604185151-ef581f913117/go.mod h1:0J6mmn3XAEjfNbPvpH63c0RXCjGNFcCzlEfWSN4In+k=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240617180043-68d350f18fd4/go.mod h1:/oe3+SiHAwz6s+M25PyTygWm3lnrhmGqIuIfkoUocqk=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:/oe3+SiHAwz6s+M25PyTygWm3lnrhmGqIuIfkoUocqk=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240708141625-4ad9e859172b/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240722135656-d784300faade/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240814211410-ddb44dafa142/go.mod h1:gQizMG9jZ0L2ADJaM+JdZV4yTCON/CQpnHRPoM+54w4=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240823204242-4ba0660f739c/go.mod h1:gQizMG9jZ0L2ADJaM+JdZV4yTCON/CQpnHRPoM+54w4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231211222908-989df2bf70f3/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240122161410-6c6643bf1457/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240228201840-1f18d85a4ec2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
@@ -1044,9 +6813,72 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
+google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
+google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
+google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
+google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
+google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
+google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
+google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1057,83 +6889,310 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.1-0.20240319125436-3039476726e4/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
+gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
+gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg=
+gopkg.in/telebot.v3 v3.1.2/go.mod h1:GJKwwWqp9nSkIVN51eRKU78aB5f5OnQuWdwiIZfPbko=
+gopkg.in/telebot.v3 v3.1.3/go.mod h1:GJKwwWqp9nSkIVN51eRKU78aB5f5OnQuWdwiIZfPbko=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU=
+gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8=
+gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
+gorm.io/gorm v1.23.5/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/gotestsum v1.9.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
+gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
+gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
+k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
+k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg=
+k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg=
+k8s.io/api v0.28.6/go.mod h1:AM6Ys6g9MY3dl/XNaNfg/GePI0FT7WBGu8efU/lirAo=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
+k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0=
+k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
+k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
+k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
+k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA=
+k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
+k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
+k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
+k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48=
+k8s.io/client-go v0.28.6/go.mod h1:+nu0Yp21Oeo/cBCsprNVXB2BfJTV51lFfe5tXl2rUL8=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
+k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc=
+k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU=
+k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
+k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
+k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
+k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
+modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI=
+modernc.org/ccgo/v3 v3.0.0-20220910160915-348f15de615a/go.mod h1:8p47QxPkdugex9J4n9P2tLZ9bK01yngIVp00g4nomW0=
+modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
+modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
+modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
+modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
+modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
+modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA=
+modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
+modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.21.2/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw=
+modernc.org/libc v1.22.4/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0=
+modernc.org/sqlite v1.21.1/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI=
+modernc.org/sqlite v1.21.2/go.mod h1:cxbLkB5WS32DnQqeH4h4o1B0eMr8W/y8/RGuxQ3JsC0=
+modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0=
+modernc.org/tcl v1.15.1/go.mod h1:aEjeGJX2gz1oWKOLDVZ2tnEWLUrIn8H+GFu+akoDhqs=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
+modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ=
+nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c=
+oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc=
+oras.land/oras-go/v2 v2.3.1/go.mod h1:5AQXVEu1X/FKp1F9DMOb5ZItZBOa0y5dha0yCm4NR9c=
+pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s=
+sigs.k8s.io/release-utils v0.8.4/go.mod h1:m1bHfscTemQp+z+pLCZnkXih9n0+WukIUU70n6nFnU0=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
+sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
+tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto=
+tags.cncf.io/container-device-interface/specs-go v0.7.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80=
diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml
new file mode 100644
index 000000000..a8bc979e0
--- /dev/null
+++ b/vendor/dario.cat/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "dario.cat/mergo"
\ No newline at end of file
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
new file mode 100644
index 000000000..45ad0f1ae
--- /dev/null
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -0,0 +1,36 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Golang/Intellij
+.idea
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml
new file mode 100644
index 000000000..d324c43ba
--- /dev/null
+++ b/vendor/dario.cat/mergo/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+arch:
+ - amd64
+ - ppc64le
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - go test -race -v ./...
+after_script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
similarity index 100%
rename from vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
rename to vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md
new file mode 100644
index 000000000..0a1ff9f94
--- /dev/null
+++ b/vendor/dario.cat/mergo/CONTRIBUTING.md
@@ -0,0 +1,112 @@
+
+# Contributing to mergo
+
+First off, thanks for taking the time to contribute! â¤ï¸
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
+
+> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
+> - Star the project
+> - Tweet about it
+> - Refer this project in your project's readme
+> - Mention the project at local meetups and tell your friends/colleagues
+
+
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [I Have a Question](#i-have-a-question)
+- [I Want To Contribute](#i-want-to-contribute)
+- [Reporting Bugs](#reporting-bugs)
+- [Suggesting Enhancements](#suggesting-enhancements)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by the
+[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold this code. Please report unacceptable behavior
+to <>.
+
+
+## I Have a Question
+
+> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
+
+Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, we recommend the following:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new).
+- Provide as much context as you can about what you're running into.
+- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
+
+We will then take care of the issue as soon as possible.
+
+## I Want To Contribute
+
+> ### Legal Notice
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Reporting Bugs
+
+
+#### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
+- Collect information about the bug:
+- Stack trace (Traceback)
+- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
+- Possibly your input and the output
+- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
+
+
+#### How Do I Submit a Good Bug Report?
+
+> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
+
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
+- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
+
+### Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
+
+
+#### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
+- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
+
+
+#### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
+- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
+- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
+- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
+
+
+## Attribution
+This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE
similarity index 100%
rename from vendor/github.com/imdario/mergo/LICENSE
rename to vendor/dario.cat/mergo/LICENSE
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
new file mode 100644
index 000000000..0b3c48889
--- /dev/null
+++ b/vendor/dario.cat/mergo/README.md
@@ -0,0 +1,258 @@
+# Mergo
+
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
+[![Test status][1]][2]
+[![OpenSSF Scorecard][21]][22]
+[![OpenSSF Best Practices][19]][20]
+[![Coverage status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA status][13]][14]
+
+[![GoDoc][3]][4]
+[![Become my sponsor][15]][16]
+[![Tidelift][17]][18]
+
+[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
+[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
+[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
+[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
+[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
+[20]: https://bestpractices.coreinfrastructure.org/projects/7177
+[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
+[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
+
+### Important notes
+
+#### 1.0.0
+
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
+
+#### 0.3.9
+
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
+
+
+
+
+### Mergo in the wild
+
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
+
+## Install
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v3
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md
new file mode 100644
index 000000000..a5de61f77
--- /dev/null
+++ b/vendor/dario.cat/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go
new file mode 100644
index 000000000..7d96ec054
--- /dev/null
+++ b/vendor/dario.cat/mergo/doc.go
@@ -0,0 +1,148 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+# Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+# Important notes
+
+1.0.0
+
+In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
+
+0.3.9
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+# Install
+
+Do your usual installation procedure:
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+# Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+# Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+# Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+# About
+
+Written by Dario Castañé: https://da.rio.hn
+
+# License
+
+BSD 3-Clause license, as Go language.
+*/
+package mergo
diff --git a/vendor/dario.cat/mergo/go.mod b/vendor/dario.cat/mergo/go.mod
new file mode 100644
index 000000000..21f2faa8c
--- /dev/null
+++ b/vendor/dario.cat/mergo/go.mod
@@ -0,0 +1,5 @@
+module dario.cat/mergo
+
+go 1.13
+
+require gopkg.in/yaml.v3 v3.0.1
diff --git a/vendor/dario.cat/mergo/go.sum b/vendor/dario.cat/mergo/go.sum
new file mode 100644
index 000000000..a62c313c5
--- /dev/null
+++ b/vendor/dario.cat/mergo/go.sum
@@ -0,0 +1,4 @@
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
new file mode 100644
index 000000000..759b4f74f
--- /dev/null
+++ b/vendor/dario.cat/mergo/map.go
@@ -0,0 +1,178 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ config.overwriteWithEmptyValue = true
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
new file mode 100644
index 000000000..fd47c95b2
--- /dev/null
+++ b/vendor/dario.cat/mergo/merge.go
@@ -0,0 +1,409 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Transformers Transformers
+ Overwrite bool
+ ShouldNotDereference bool
+ AppendSlice bool
+ TypeCheck bool
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ if dst.CanSet() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ } else {
+ dst = src
+ return
+ }
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite && dst.CanSet() {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if src.Elem().Kind() != reflect.Struct {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go
similarity index 85%
rename from vendor/github.com/imdario/mergo/mergo.go
rename to vendor/dario.cat/mergo/mergo.go
index 3cc926c7f..0a721e2d8 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/dario.cat/mergo/mergo.go
@@ -17,10 +17,10 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
- ErrNonPointerAgument = errors.New("dst must be a pointer")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@@ -28,13 +28,13 @@ var (
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
- ptr uintptr
typ reflect.Type
next *visit
+ ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
@@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() {
return true
}
- return isEmptyValue(v.Elem())
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
@@ -65,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
index 0cd380037..fe79e3add 100644
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -1,5 +1,2 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test
+/toml.test
+/toml-test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4f0..000000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index 6efcfd0ce..000000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848d3..000000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 7c1b37ecc..639e6c399 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -1,46 +1,26 @@
-## TOML parser and encoder for Go with reflection
-
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
+reflection interface similar to Go's standard library `json` and `xml` packages.
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Documentation: https://godoc.org/github.com/BurntSushi/toml
+Documentation: https://godocs.io/github.com/BurntSushi/toml
-Installation:
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
-```bash
-go get github.com/BurntSushi/toml
-```
+This library requires Go 1.18 or newer; add it to your go.mod with:
-Try the toml validator:
+ % go get github.com/BurntSushi/toml@latest
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
+It also comes with a TOML validator CLI tool:
-[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
+ % go install github.com/BurntSushi/toml/cmd/tomlv@latest
+ % tomlv some-toml-file.toml
### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
+For the simplest example, consider some TOML file as just a list of keys and
+values:
```toml
Age = 25
@@ -50,29 +30,23 @@ Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
-Which could be defined in Go as:
+Which can be decoded with:
```go
type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time
}
-```
-And then decoded with:
-
-```go
var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
+_, err := toml.Decode(tomlData, &conf)
```
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
+You can also use struct tags if your struct field name doesn't map to a TOML key
+value directly:
```toml
some_key_NAME = "wat"
@@ -80,139 +54,67 @@ some_key_NAME = "wat"
```go
type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
+ ObscureKey string `toml:"some_key_NAME"`
}
```
-### Using the `encoding.TextUnmarshaler` interface
+Beware that like other decoders **only exported fields** are considered when
+encoding and decoding; private fields are silently ignored.
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
+### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
+Here's an example that automatically parses values in a `mail.Address`:
```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
+contacts = [
+ "Donald Duck ",
+ "Scrooge McDuck ",
+]
```
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
+Can be decoded with:
```go
-type duration struct {
- time.Duration
+// Create address type which satisfies the encoding.TextUnmarshaler interface.
+type address struct {
+ *mail.Address
}
-func (d *duration) UnmarshalText(text []byte) error {
+func (a *address) UnmarshalText(text []byte) error {
var err error
- d.Duration, err = time.ParseDuration(string(text))
+ a.Address, err = mail.ParseAddress(string(text))
return err
}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
+// Decode it.
+func decode() {
+ blob := `
+ contacts = [
+ "Donald Duck ",
+ "Scrooge McDuck ",
+ ]
+ `
+
+ var contacts struct {
+ Contacts []address
+ }
+
+ _, err := toml.Decode(blob, &contacts)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for _, c := range contacts.Contacts {
+ fmt.Printf("%#v\n", c.Address)
+ }
+
+ // Output:
+ // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
+ // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
}
```
-Note that a case insensitive match will be tried if an exact match can't be
-found.
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
-A working example of the above can be found in `_examples/example.{go,toml}`.
+### More complex usage
+See the [`_example/`](/_example) directory for a more complex example.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index b0fd51d5b..7aaf462c9 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -1,146 +1,199 @@
package toml
import (
+ "bytes"
+ "encoding"
+ "encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math"
+ "os"
"reflect"
+ "strconv"
"strings"
"time"
)
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
+ UnmarshalTOML(any) error
}
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
+// Unmarshal decodes the contents of data in TOML format into a pointer v.
+//
+// See [Decoder] for a description of the decoding process.
+func Unmarshal(data []byte, v any) error {
+ _, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
+// Decode the TOML data in to the pointer v.
+//
+// See [Decoder] for a description of the decoding process.
+func Decode(data string, v any) (MetaData, error) {
+ return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile reads the contents of a file and decodes it with [Decode].
+func DecodeFile(path string, v any) (MetaData, error) {
+ fp, err := os.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
+// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
+// [Decode].
+func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use [PrimitiveDecode] to "manually" decode these values.
//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
type Primitive struct {
- undecoded interface{}
+ undecoded any
context Key
}
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
+// The significand precision for float32 and float64 is 24 and 53 bits; this is
+// the range a natural number can be stored in a float without loss of data.
+const (
+ maxSafeFloat32Int = 16777215 // 2^24-1
+ maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
+)
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
+// Decoder decodes TOML data.
//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
+// TOML tables correspond to Go structs or maps; they can be used
+// interchangeably, but structs offer better type safety.
//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
+// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
+// local timezone.
//
-// TOML datetimes correspond to Go `time.Time` values.
+// [time.Duration] types are treated as nanoseconds if the TOML value is an
+// integer, or they're parsed with time.ParseDuration() if they're strings.
//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// email addresses.
//
-// Key mapping
+// # Key mapping
//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+ r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+var (
+ unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
+)
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v any) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ s := "%q"
+ if reflect.TypeOf(v) == nil {
+ s = "%v"
+ }
+
+ return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
}
if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
- }
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
+ return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
+
+ // Check if this is a supported type: struct, map, any, or something that
+ // implements UnmarshalTOML or UnmarshalText.
+ rv = indirect(rv)
+ rt := rv.Type()
+ if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
+ !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
+ !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
+ return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
}
- return md, md.unify(p.mapping, indirect(rv))
-}
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
+ // TODO: parser should read from io.Reader? Or at the very least, make it
+ // read from []byte rather than string
+ data, err := io.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
- return Decode(string(bs), v)
-}
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
+ p, err := parse(string(data))
if err != nil {
return MetaData{}, err
}
- return Decode(string(bs), v)
+
+ md := MetaData{
+ mapping: p.mapping,
+ keyInfo: p.keyInfo,
+ keys: p.ordered,
+ decoded: make(map[string]struct{}, len(p.ordered)),
+ context: nil,
+ data: data,
+ }
+ return md, md.unify(p.mapping, rv)
+}
+
+// PrimitiveDecode is just like the other Decode* functions, except it decodes a
+// TOML value that has already been parsed. Valid primitive values can *only* be
+// obtained from values filled by the decoder functions, including this method.
+// (i.e., v may contain more [Primitive] values.)
+//
+// Meta data for primitive values is included in the meta data returned by the
+// Decode* functions with one exception: keys returned by the Undecoded method
+// will only reflect keys that were decoded. Namely, any keys hidden behind a
+// Primitive will be considered undecoded. Executing this method will update the
+// undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
}
// unify performs a sort of type unification based on the structure of `rv`,
@@ -148,10 +201,10 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
+func (md *MetaData) unify(data any, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // TODO: #76 would make this superfluous after implemented.
+ if rv.Type() == primitiveType {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
@@ -163,48 +216,32 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return nil
}
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
+ rvi := rv.Interface()
+ if v, ok := rvi.(Unmarshaler); ok {
+ err := v.UnmarshalTOML(data)
+ if err != nil {
+ return md.parseErr(err)
}
+ return nil
}
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
}
- // BUG(burntsushi)
+
+ // TODO:
// The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+ // array. In particular, the unmarshaler should only be applied to primitive
+ // TOML values. But at this point, it will be applied to all kinds of values
+ // and produce an incorrect error whenever those values are hashes or arrays
+ // (including arrays of tables).
k := rv.Kind()
- // laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
@@ -218,27 +255,23 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
+ if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
+ return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
+ case reflect.Float32, reflect.Float64:
return md.unifyFloat64(data, rv)
}
- return e("unsupported type %s", rv.Kind())
+ return md.e("unsupported type %s", rv.Kind())
}
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
+func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]any)
if !ok {
if mapping == nil {
return nil
}
- return e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
+ return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
}
for key, datum := range tmap {
@@ -259,74 +292,88 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
+
if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
+ md.decoded[md.context.add(key).String()] = struct{}{}
md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
+
+ err := md.unify(datum, subv)
+ if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
+ return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
}
}
}
return nil
}
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
+func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
+ keyType := rv.Type().Key().Kind()
+ if keyType != reflect.String && keyType != reflect.Interface {
+ return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
+ keyType, rv.Type())
+ }
+
+ tmap, ok := mapping.(map[string]any)
if !ok {
if tmap == nil {
return nil
}
- return badtype("map", mapping)
+ return md.badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
+ md.decoded[md.context.add(k).String()] = struct{}{}
md.context = append(md.context, k)
- rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
+
+ err := md.unify(v, indirect(rvval))
+ if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
- rvkey.SetString(k)
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+
+ switch keyType {
+ case reflect.Interface:
+ rvkey.Set(reflect.ValueOf(k))
+ case reflect.String:
+ rvkey.SetString(k)
+ }
+
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
- return badtype("slice", data)
+ return md.badtype("slice", data)
}
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
+ if l := datav.Len(); l != rv.Len() {
+ return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
}
return md.unifySliceArray(datav, rv)
}
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
- return badtype("slice", data)
+ return md.badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
@@ -337,37 +384,45 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
+ l := data.Len()
+ for i := 0; i < l; i++ {
+ err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+ if err != nil {
return err
}
}
return nil
}
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
+func (md *MetaData) unifyString(data any, rv reflect.Value) error {
+ _, ok := rv.Interface().(json.Number)
+ if ok {
+ if i, ok := data.(int64); ok {
+ rv.SetString(strconv.FormatInt(i, 10))
+ } else if f, ok := data.(float64); ok {
+ rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
+ } else {
+ return md.badtype("string", data)
+ }
return nil
}
- return badtype("time.Time", data)
-}
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
- return badtype("string", data)
+ return md.badtype("string", data)
}
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
+ rvk := rv.Kind()
+
if num, ok := data.(float64); ok {
- switch rv.Kind() {
+ switch rvk {
case reflect.Float32:
+ if num < -math.MaxFloat32 || num > math.MaxFloat32 {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
fallthrough
case reflect.Float64:
rv.SetFloat(num)
@@ -376,73 +431,85 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
}
return nil
}
- return badtype("float", data)
-}
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
+ if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
+ (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
+ return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
}
+ rv.SetFloat(float64(num))
return nil
}
- return badtype("integer", data)
+
+ return md.badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
+ _, ok := rv.Interface().(time.Duration)
+ if ok {
+ // Parse as string duration, and fall back to regular integer parsing
+ // (as nanosecond) if this is not a string.
+ if s, ok := data.(string); ok {
+ dur, err := time.ParseDuration(s)
+ if err != nil {
+ return md.parseErr(errParseDuration{s})
+ }
+ rv.SetInt(int64(dur))
+ return nil
+ }
+ }
+
+ num, ok := data.(int64)
+ if !ok {
+ return md.badtype("integer", data)
+ }
+
+ rvk := rv.Kind()
+ switch {
+ case rvk >= reflect.Int && rvk <= reflect.Int64:
+ if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
+ (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
+ (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
+ rv.SetInt(num)
+ case rvk >= reflect.Uint && rvk <= reflect.Uint64:
+ unum := uint64(num)
+ if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
+ rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
+ rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
+ rv.SetUint(unum)
+ default:
+ panic("unreachable")
+ }
+ return nil
}
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
- return badtype("boolean", data)
+ return md.badtype("boolean", data)
}
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
- case TextMarshaler:
+ case Marshaler:
+ text, err := sdata.MarshalTOML()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case encoding.TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
@@ -459,30 +526,62 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
case float64:
s = fmt.Sprintf("%f", sdata)
default:
- return badtype("primitive (string-like)", data)
+ return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
+ return md.parseErr(err)
}
return nil
}
+func (md *MetaData) badtype(dst string, data any) error {
+ return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
+}
+
+func (md *MetaData) parseErr(err error) error {
+ k := md.context.String()
+ return ParseError{
+ LastKey: k,
+ Position: md.keyInfo[k].pos,
+ Line: md.keyInfo[k].pos.Line,
+ err: err,
+ input: string(md.data),
+ }
+}
+
+func (md *MetaData) e(format string, args ...any) error {
+ f := "toml: "
+ if len(md.context) > 0 {
+ f = fmt.Sprintf("toml: (last key %q): ", md.context)
+ p := md.keyInfo[md.context.String()].pos
+ if p.Line > 0 {
+ f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
+ }
+ }
+ return fmt.Errorf(f+format, args...)
+}
+
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
+func rvalue(v any) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
+// Pointers are followed until the value is not a pointer. New values are
+// allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of interest
+// to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ pvi := pv.Interface()
+ if _, ok := pvi.(encoding.TextUnmarshaler); ok {
+ return pv
+ }
+ if _, ok := pvi.(Unmarshaler); ok {
return pv
}
}
@@ -498,12 +597,17 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ rvi := rv.Interface()
+ if _, ok := rvi.(encoding.TextUnmarshaler); ok {
+ return true
+ }
+ if _, ok := rvi.(Unmarshaler); ok {
return true
}
return false
}
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
+// fmt %T with "interface {}" replaced with "any", which is far more readable.
+func fmtType(t any) string {
+ return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index b9914a679..000000000
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
new file mode 100644
index 000000000..155709a80
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -0,0 +1,29 @@
+package toml
+
+import (
+ "encoding"
+ "io"
+)
+
+// TextMarshaler is an alias for encoding.TextMarshaler.
+//
+// Deprecated: use encoding.TextMarshaler
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
+//
+// Deprecated: use encoding.TextUnmarshaler
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// DecodeReader is an alias for NewDecoder(r).Decode(v).
+//
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
+
+// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
+//
+// Deprecated: use MetaData.PrimitiveDecode.
+func PrimitiveDecode(primValue Primitive, v any) error {
+ md := MetaData{decoded: make(map[string]struct{})}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index b371f396e..82c90a905 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -1,27 +1,8 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
+// Package toml implements decoding and encoding of TOML files.
+//
+// This package supports TOML v1.0.0, as specified at https://toml.io
+//
+// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+// and can be used to verify if TOML document is valid. It can also be used to
+// print the type of each key.
package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index d905c21a2..73366c0d9 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,90 +2,149 @@ package toml
import (
"bufio"
+ "bytes"
+ "encoding"
+ "encoding/json"
"errors"
"fmt"
"io"
+ "math"
"reflect"
"sort"
"strconv"
"strings"
"time"
+
+ "github.com/BurntSushi/toml/internal"
)
type tomlEncodeError struct{ error }
var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
+ errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+ errNonString = errors.New("toml: cannot encode a map with non-string key type")
+ errNoKey = errors.New("toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
)
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
+var dblQuotedReplacer = strings.NewReplacer(
"\"", "\\\"",
"\\", "\\\\",
+ "\x00", `\u0000`,
+ "\x01", `\u0001`,
+ "\x02", `\u0002`,
+ "\x03", `\u0003`,
+ "\x04", `\u0004`,
+ "\x05", `\u0005`,
+ "\x06", `\u0006`,
+ "\x07", `\u0007`,
+ "\b", `\b`,
+ "\t", `\t`,
+ "\n", `\n`,
+ "\x0b", `\u000b`,
+ "\f", `\f`,
+ "\r", `\r`,
+ "\x0e", `\u000e`,
+ "\x0f", `\u000f`,
+ "\x10", `\u0010`,
+ "\x11", `\u0011`,
+ "\x12", `\u0012`,
+ "\x13", `\u0013`,
+ "\x14", `\u0014`,
+ "\x15", `\u0015`,
+ "\x16", `\u0016`,
+ "\x17", `\u0017`,
+ "\x18", `\u0018`,
+ "\x19", `\u0019`,
+ "\x1a", `\u001a`,
+ "\x1b", `\u001b`,
+ "\x1c", `\u001c`,
+ "\x1d", `\u001d`,
+ "\x1e", `\u001e`,
+ "\x1f", `\u001f`,
+ "\x7f", `\u007f`,
)
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
+var (
+ marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+)
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
}
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
+// Marshal returns a TOML representation of the Go value.
+//
+// See [Encoder] for a description of the encoding process.
+func Marshal(v any) ([]byte, error) {
+ buff := new(bytes.Buffer)
+ if err := NewEncoder(buff).Encode(v); err != nil {
+ return nil, err
}
+ return buff.Bytes(), nil
}
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for [Decode].
+//
+// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
+// representation.
//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
+// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
+// encoding the value as custom TOML.
//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
+// If you want to write arbitrary binary data then you will need to use
+// something like base64 since TOML does not have any binary types.
//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
+//
+// The toml struct tag can be used to provide the key name; if omitted the
+// struct field name will be used. If the "omitempty" option is present the
+// following value will be skipped:
+//
+// - arrays, slices, maps, and string with len of 0
+// - struct with all zero values
+// - bool false
+//
+// If omitzero is given all int and float types with a value of 0 will be
+// skipped.
+//
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
+type Encoder struct {
+ Indent string // string for a single indentation level; default is two spaces.
+ hasWritten bool // written any output to w yet?
+ w *bufio.Writer
+}
+
+// NewEncoder create a new Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: bufio.NewWriter(w), Indent: " "}
+}
+
+// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
+func (enc *Encoder) Encode(v any) error {
rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ err := enc.safeEncode(Key([]string{}), rv)
+ if err != nil {
return err
}
return enc.w.Flush()
@@ -106,13 +165,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
+ // If we can marshal the type to text, then we use that. This prevents the
+ // encoder for handling these types as generic structs (or whatever the
+ // underlying type of a TextMarshaler is).
+ switch {
+ case isMarshaler(rv):
+ enc.writeKeyValue(key, rv, false)
+ return
+ case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
+ enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
return
}
@@ -123,12 +184,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
}
case reflect.Interface:
if rv.IsNil() {
@@ -148,55 +209,126 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct:
enc.eTable(key, rv)
default:
- panic(e("unsupported type for key '%s': %s", key, k))
+ encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
}
}
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
+// eElement encodes any value that can be an array element.
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+ format := time.RFC3339Nano
+ switch v.Location() {
+ case internal.LocalDatetime:
+ format = "2006-01-02T15:04:05.999999999"
+ case internal.LocalDate:
+ format = "2006-01-02"
+ case internal.LocalTime:
+ format = "15:04:05.999999999"
+ }
+ switch v.Location() {
+ default:
+ enc.wf(v.Format(format))
+ case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+ enc.wf(v.In(time.UTC).Format(format))
+ }
return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
+ case Marshaler:
+ s, err := v.MarshalTOML()
+ if err != nil {
encPanic(err)
- } else {
- enc.writeQuoted(string(s))
}
+ if s == nil {
+ encPanic(errors.New("MarshalTOML returned nil and no error"))
+ }
+ enc.w.Write(s)
return
+ case encoding.TextMarshaler:
+ s, err := v.MarshalText()
+ if err != nil {
+ encPanic(err)
+ }
+ if s == nil {
+ encPanic(errors.New("MarshalText returned nil and no error"))
+ }
+ enc.writeQuoted(string(s))
+ return
+ case time.Duration:
+ enc.writeQuoted(v.String())
+ return
+ case json.Number:
+ n, _ := rv.Interface().(json.Number)
+
+ if n == "" { /// Useful zero value.
+ enc.w.WriteByte('0')
+ return
+ } else if v, err := n.Int64(); err == nil {
+ enc.eElement(reflect.ValueOf(v))
+ return
+ } else if v, err := n.Float64(); err == nil {
+ enc.eElement(reflect.ValueOf(v))
+ return
+ }
+ encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
}
+
switch rv.Kind() {
+ case reflect.Ptr:
+ enc.eElement(rv.Elem())
+ return
+ case reflect.String:
+ enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+ }
case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+ }
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
+ case reflect.Struct:
+ enc.eStruct(nil, rv, true)
+ case reflect.Map:
+ enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
+ encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
}
}
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
@@ -205,14 +337,14 @@ func floatAddDecimal(fstr string) string {
}
func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
+ enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
- elem := rv.Index(i)
+ elem := eindirect(rv.Index(i))
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
@@ -226,44 +358,43 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
+ trv := eindirect(rv.Index(i))
if isNil(trv) {
continue
}
- panicIfInvalidKey(key)
enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
- enc.eMapOrStruct(key, trv)
+ enc.eMapOrStruct(key, trv, false)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.wf("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
- enc.eMapOrStruct(key, rv)
+ enc.eMapOrStruct(key, rv, false)
}
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
+ switch rv.Kind() {
case reflect.Map:
- enc.eMap(key, rv)
+ enc.eMap(key, rv, inline)
case reflect.Struct:
- enc.eStruct(key, rv)
+ enc.eStruct(key, rv, inline)
default:
+ // Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
@@ -274,68 +405,100 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
- var writeMapKeys = func(mapKeys []string) {
+ var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
+ for i, mapKey := range mapKeys {
+ val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
+ if isNil(val) {
continue
}
- enc.encode(key.add(mapKey), mrv)
+
+ if inline {
+ enc.writeKeyValue(Key{mapKey}, val, true)
+ if trailC || i != len(mapKeys)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(mapKey), val)
+ }
}
}
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
+
+ if inline {
+ enc.wf("{")
+ }
+ writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+ writeMapKeys(mapKeysSub, false)
+ if inline {
+ enc.wf("}")
+ }
}
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+const is32Bit = (32 << (^uint(0) >> 63)) == 32
+
+func pointerTo(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ return pointerTo(t.Elem())
+ }
+ return t
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
+ // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ //
+ // Fields is a [][]int: for fieldsDirect this always has one entry (the
+ // struct index). For fieldsSub it contains two entries: the parent field
+ // index from tv, and the field indexes for the fields of the sub.
+ var (
+ rt = rv.Type()
+ fieldsDirect, fieldsSub [][]int
+ addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ )
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
+ isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
+ if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
+ continue
+ }
+ opts := getOptions(f.Tag)
+ if opts.skip {
continue
}
- frv := rv.Field(i)
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
- }
- continue
- }
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
+
+ frv := eindirect(rv.Field(i))
+
+ if is32Bit {
+ // Copy so it works correct on 32bit archs; not clear why this
+ // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
+ // This also works fine on 64bit, but 32bit archs are somewhat
+ // rare and this is a wee bit faster.
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ start = copyStart
+ }
+
+ // Treat anonymous struct fields with tag names as though they are
+ // not anonymous, like encoding/json does.
+ //
+ // Non-struct anonymous fields use the normal encoding logic.
+ if isEmbed {
+ if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
+ addFields(frv.Type(), frv, append(start, f.Index...))
+ continue
}
}
- if typeIsHash(tomlTypeOfGo(frv)) {
+ if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
@@ -344,48 +507,81 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
}
addFields(rt, rv, nil)
- var writeFields = func(fields [][]int) {
+ writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
+ fieldType := rt.FieldByIndex(fieldIndex)
+ fieldVal := rv.FieldByIndex(fieldIndex)
+
+ opts := getOptions(fieldType.Tag)
+ if opts.skip {
+ continue
+ }
+ if opts.omitempty && isEmpty(fieldVal) {
continue
}
- opts := getOptions(sft.Tag)
- if opts.skip {
+ fieldVal = eindirect(fieldVal)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
- keyName := sft.Name
+
+ keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && isEmpty(sf) {
- continue
- }
- if opts.omitzero && isZero(sf) {
+
+ if opts.omitzero && isZero(fieldVal) {
continue
}
- enc.encode(key.add(keyName), sf)
+ if inline {
+ enc.writeKeyValue(Key{keyName}, fieldVal, true)
+ if fieldIndex[0] != len(fields)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(keyName), fieldVal)
+ }
}
}
+
+ if inline {
+ enc.wf("{")
+ }
writeFields(fieldsDirect)
writeFields(fieldsSub)
+ if inline {
+ enc.wf("}")
+ }
}
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
+// tomlTypeOfGo returns the TOML type name of the Go value's type.
+//
+// It is used to determine whether the types of array elements are mixed (which
+// is forbidden). If the Go value is nil, then it is illegal for it to be an
+// array element, and valueIsNil is returned as true.
+//
+// The type may be `nil`, which means no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
+
+ if rv.Kind() == reflect.Struct {
+ if rv.Type() == timeType {
+ return tomlDatetime
+ }
+ if isMarshaler(rv) {
+ return tomlString
+ }
+ return tomlHash
+ }
+
+ if isMarshaler(rv) {
+ return tomlString
+ }
+
switch rv.Kind() {
case reflect.Bool:
return tomlBool
@@ -397,7 +593,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
+ if isTableArray(rv) {
return tomlArrayHash
}
return tomlArray
@@ -407,54 +603,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
return tomlString
case reflect.Map:
return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
+ encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+ panic("unreachable")
}
}
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
+func isMarshaler(rv reflect.Value) bool {
+ return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
+}
+
+// isTableArray reports if all entries in the array or slice are a table.
+func isTableArray(arr reflect.Value) bool {
+ if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
+ return false
}
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
+ ret := true
+ for i := 0; i < arr.Len(); i++ {
+ tt := tomlTypeOfGo(eindirect(arr.Index(i)))
+ // Don't allow nil.
+ if tt == nil {
encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
}
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
+
+ if ret && !typeEqual(tomlHash, tt) {
+ ret = false
}
}
- return firstType
+ return ret
}
type tagOptions struct {
@@ -499,8 +676,26 @@ func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
+ case reflect.Struct:
+ if rv.Type().Comparable() {
+ return reflect.Zero(rv.Type()).Interface() == rv.Interface()
+ }
+ // Need to also check if all the fields are empty, otherwise something
+ // like this with uncomparable types will always return true:
+ //
+ // type a struct{ field b }
+ // type b struct{ s []string }
+ // s := a{field: b{s: []string{"AAA"}}}
+ for i := 0; i < rv.NumField(); i++ {
+ if !isEmpty(rv.Field(i)) {
+ return false
+ }
+ }
+ return true
case reflect.Bool:
return !rv.Bool()
+ case reflect.Ptr:
+ return rv.IsNil()
}
return false
}
@@ -511,18 +706,34 @@ func (enc *Encoder) newline() {
}
}
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+// Write a key/value pair:
+//
+// key =
+//
+// This is also used for "k = v" in inline tables; so something like this will
+// be written in three calls:
+//
+// ┌───────────────────â”
+// │ ┌───┠┌────â”│
+// v v v v vv
+// key = {k = 1, k2 = 2}
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+ /// Marshaler used on top-level document; call eElement() to just call
+ /// Marshal{TOML,Text}.
if len(key) == 0 {
- encPanic(errNoKey)
+ enc.eElement(val)
+ return
}
- panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
- enc.newline()
+ if !inline {
+ enc.newline()
+ }
}
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+func (enc *Encoder) wf(format string, v ...any) {
+ _, err := fmt.Fprintf(enc.w, format, v...)
+ if err != nil {
encPanic(err)
}
enc.hasWritten = true
@@ -536,13 +747,25 @@ func encPanic(err error) {
panic(tomlEncodeError{err})
}
+// Resolve any level of pointers to the actual value (e.g. **string → string).
func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
+ if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
+ if isMarshaler(v) {
+ return v
+ }
+ if v.CanAddr() { /// Special case for marshalers; see #358.
+ if pv := v.Addr(); isMarshaler(pv) {
+ return pv
+ }
+ }
+ return v
+ }
+
+ if v.IsNil() {
return v
}
+
+ return eindirect(v.Elem())
}
func isNil(rv reflect.Value) bool {
@@ -553,16 +776,3 @@ func isNil(rv reflect.Value) bool {
return false
}
}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd60..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d04..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
new file mode 100644
index 000000000..b45a3f45f
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -0,0 +1,356 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ParseError is returned when there is an error parsing the TOML syntax such as
+// invalid syntax, duplicate keys, etc.
+//
+// In addition to the error message itself, you can also print detailed location
+// information with context by using [ErrorWithPosition]:
+//
+// toml: error: Key 'fruit' was already created and cannot be used as an array.
+//
+// At line 4, column 2-7:
+//
+// 2 | fruit = []
+// 3 |
+// 4 | [[fruit]] # Not allowed
+// ^^^^^
+//
+// [ErrorWithUsage] can be used to print the above with some more detailed usage
+// guidance:
+//
+// toml: error: newlines not allowed within inline tables
+//
+// At line 1, column 18:
+//
+// 1 | x = [{ key = 42 #
+// ^
+//
+// Error help:
+//
+// Inline tables must always be on a single line:
+//
+// table = {key = 42, second = 43}
+//
+// It is invalid to split them over multiple lines like so:
+//
+// # INVALID
+// table = {
+// key = 42,
+// second = 43
+// }
+//
+// Use regular for this:
+//
+// [table]
+// key = 42
+// second = 43
+type ParseError struct {
+ Message string // Short technical message.
+ Usage string // Longer message with usage guidance; may be blank.
+ Position Position // Position of the error
+ LastKey string // Last parsed key, may be blank.
+
+ // Line the error occurred.
+ //
+ // Deprecated: use [Position].
+ Line int
+
+ err error
+ input string
+}
+
+// Position of an error.
+type Position struct {
+ Line int // Line number, starting at 1.
+ Start int // Start of error, as byte offset starting at 0.
+ Len int // Lenght in bytes.
+}
+
+func (pe ParseError) Error() string {
+ msg := pe.Message
+ if msg == "" { // Error from errorf()
+ msg = pe.err.Error()
+ }
+
+ if pe.LastKey == "" {
+ return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+ }
+ return fmt.Sprintf("toml: line %d (last key %q): %s",
+ pe.Position.Line, pe.LastKey, msg)
+}
+
+// ErrorWithPosition returns the error with detailed location context.
+//
+// See the documentation on [ParseError].
+func (pe ParseError) ErrorWithPosition() string {
+ if pe.input == "" { // Should never happen, but just in case.
+ return pe.Error()
+ }
+
+ var (
+ lines = strings.Split(pe.input, "\n")
+ col = pe.column(lines)
+ b = new(strings.Builder)
+ )
+
+ msg := pe.Message
+ if msg == "" {
+ msg = pe.err.Error()
+ }
+
+ // TODO: don't show control characters as literals? This may not show up
+ // well everywhere.
+
+ if pe.Position.Len == 1 {
+ fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
+ msg, pe.Position.Line, col+1)
+ } else {
+ fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
+ msg, pe.Position.Line, col, col+pe.Position.Len)
+ }
+ if pe.Position.Line > 2 {
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
+ }
+ if pe.Position.Line > 1 {
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
+ }
+
+ /// Expand tabs, so that the ^^^s are at the correct position, but leave
+ /// "column 10-13" intact. Adjusting this to the visual column would be
+ /// better, but we don't know the tabsize of the user in their editor, which
+ /// can be 8, 4, 2, or something else. We can't know. So leaving it as the
+ /// character index is probably the "most correct".
+ expanded := expandTab(lines[pe.Position.Line-1])
+ diff := len(expanded) - len(lines[pe.Position.Line-1])
+
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
+ return b.String()
+}
+
+// ErrorWithUsage returns the error with detailed location context and usage
+// guidance.
+//
+// See the documentation on [ParseError].
+func (pe ParseError) ErrorWithUsage() string {
+ m := pe.ErrorWithPosition()
+ if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
+ lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
+ for i := range lines {
+ if lines[i] != "" {
+ lines[i] = " " + lines[i]
+ }
+ }
+ return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
+ }
+ return m
+}
+
+func (pe ParseError) column(lines []string) int {
+ var pos, col int
+ for i := range lines {
+ ll := len(lines[i]) + 1 // +1 for the removed newline
+ if pos+ll >= pe.Position.Start {
+ col = pe.Position.Start - pos
+ if col < 0 { // Should never happen, but just in case.
+ col = 0
+ }
+ break
+ }
+ pos += ll
+ }
+
+ return col
+}
+
+func expandTab(s string) string {
+ var (
+ b strings.Builder
+ l int
+ fill = func(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b)
+ }
+ )
+ b.Grow(len(s))
+ for _, r := range s {
+ switch r {
+ case '\t':
+ tw := 8 - l%8
+ b.WriteString(fill(tw))
+ l += tw
+ default:
+ b.WriteRune(r)
+ l += 1
+ }
+ }
+ return b.String()
+}
+
+type (
+ errLexControl struct{ r rune }
+ errLexEscape struct{ r rune }
+ errLexUTF8 struct{ b byte }
+ errParseDate struct{ v string }
+ errLexInlineTableNL struct{}
+ errLexStringNL struct{}
+ errParseRange struct {
+ i any // int or float
+ size string // "int64", "uint16", etc.
+ }
+ errUnsafeFloat struct {
+ i interface{} // float32 or float64
+ size string // "float32" or "float64"
+ }
+ errParseDuration struct{ d string }
+)
+
+func (e errLexControl) Error() string {
+ return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
+}
+func (e errLexControl) Usage() string { return "" }
+
+func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
+func (e errLexEscape) Usage() string { return usageEscape }
+func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
+func (e errLexUTF8) Usage() string { return "" }
+func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
+func (e errParseDate) Usage() string { return usageDate }
+func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
+func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
+func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
+func (e errLexStringNL) Usage() string { return usageStringNewline }
+func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
+func (e errParseRange) Usage() string { return usageIntOverflow }
+func (e errUnsafeFloat) Error() string {
+ return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
+}
+func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
+func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
+func (e errParseDuration) Usage() string { return usageDuration }
+
+const usageEscape = `
+A '\' inside a "-delimited string is interpreted as an escape character.
+
+The following escape sequences are supported:
+\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
+
+To prevent a '\' from being recognized as an escape character, use either:
+
+- a ' or '''-delimited string; escape characters aren't processed in them; or
+- write two backslashes to get a single backslash: '\\'.
+
+If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
+instead of '\' will usually also work: "C:/Users/martin".
+`
+
+const usageInlineNewline = `
+Inline tables must always be on a single line:
+
+ table = {key = 42, second = 43}
+
+It is invalid to split them over multiple lines like so:
+
+ # INVALID
+ table = {
+ key = 42,
+ second = 43
+ }
+
+Use regular for this:
+
+ [table]
+ key = 42
+ second = 43
+`
+
+const usageStringNewline = `
+Strings must always be on a single line, and cannot span more than one line:
+
+ # INVALID
+ string = "Hello,
+ world!"
+
+Instead use """ or ''' to split strings over multiple lines:
+
+ string = """Hello,
+ world!"""
+`
+
+const usageIntOverflow = `
+This number is too large; this may be an error in the TOML, but it can also be a
+bug in the program that uses too small of an integer.
+
+The maximum and minimum values are:
+
+ size │ lowest │ highest
+ ───────┼────────────────┼──────────────
+ int8 │ -128 │ 127
+ int16 │ -32,768 │ 32,767
+ int32 │ -2,147,483,648 │ 2,147,483,647
+ int64 │ -9.2 × 10¹ⷠ│ 9.2 × 10¹â·
+ uint8 │ 0 │ 255
+ uint16 │ 0 │ 65,535
+ uint32 │ 0 │ 4,294,967,295
+ uint64 │ 0 │ 1.8 × 10¹â¸
+
+int refers to int32 on 32-bit systems and int64 on 64-bit systems.
+`
+
+const usageUnsafeFloat = `
+This number is outside of the "safe" range for floating point numbers; whole
+(non-fractional) numbers outside the below range can not always be represented
+accurately in a float, leading to some loss of accuracy.
+
+Explicitly mark a number as a fractional unit by adding ".0", which will incur
+some loss of accuracy; for example:
+
+ f = 2_000_000_000.0
+
+Accuracy ranges:
+
+ float32 = 16,777,215
+ float64 = 9,007,199,254,740,991
+`
+
+const usageDuration = `
+A duration must be as "number", without any spaces. Valid units are:
+
+ ns nanoseconds (billionth of a second)
+ us, µs microseconds (millionth of a second)
+ ms milliseconds (thousands of a second)
+ s seconds
+ m minutes
+ h hours
+
+You can combine multiple units; for example "5m10s" for 5 minutes and 10
+seconds.
+`
+
+const usageDate = `
+A TOML datetime must be in one of the following formats:
+
+ 2006-01-02T15:04:05Z07:00 Date and time, with timezone.
+ 2006-01-02T15:04:05 Date and time, but without timezone.
+ 2006-01-02 Date without a time or timezone.
+ 15:04:05 Just a time, without any timezone.
+
+Seconds may optionally have a fraction, up to nanosecond precision:
+
+ 15:04:05.123
+ 15:04:05.856018510
+`
+
+// TOML 1.1:
+// The seconds part in times is optional, and may be omitted:
+// 2006-01-02T15:04Z07:00
+// 2006-01-02T15:04
+// 15:04
diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod
new file mode 100644
index 000000000..57b1d3738
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/go.mod
@@ -0,0 +1,3 @@
+module github.com/BurntSushi/toml
+
+go 1.18
diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go
new file mode 100644
index 000000000..022f15bc2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/internal/tz.go
@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+ localOffset = func() int { _, o := time.Now().Zone(); return o }()
+ LocalDatetime = time.FixedZone("datetime-local", localOffset)
+ LocalDate = time.FixedZone("date-local", localOffset)
+ LocalTime = time.FixedZone("time-local", localOffset)
+)
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index e0a742a88..a1016d98a 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "reflect"
+ "runtime"
"strings"
"unicode"
"unicode/utf8"
@@ -15,6 +17,7 @@ const (
itemEOF
itemText
itemString
+ itemStringEsc
itemRawString
itemMultilineString
itemRawMultilineString
@@ -29,61 +32,50 @@ const (
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
+ itemKeyEnd
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
+const eof = 0
type stateFn func(lx *lexer) stateFn
+func (p Position) String() string {
+ return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
+}
+
type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
-
- // Allow for backing up up to three runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+ tomlNext bool
+ esc bool
+
+ // Allow for backing up up to 4 runes. This is necessary because TOML
+ // contains 3-rune tokens (""" and ''').
+ prevWidths [4]int
+ nprev int // how many of prevWidths are in use
+ atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again.
// A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
+ //
+ // The idea is to reuse parts of the state machine in various places. For
+ // example, values can appear at the top level or within arbitrarily nested
+ // arrays. The last state on the stack is used after a value has been lexed.
+ // Similarly for comments.
stack []stateFn
}
type item struct {
- typ itemType
- val string
- line int
+ typ itemType
+ val string
+ err error
+ pos Position
}
func (lx *lexer) nextItem() item {
@@ -93,17 +85,19 @@ func (lx *lexer) nextItem() item {
return item
default:
lx.state = lx.state(lx)
+ //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
}
}
}
-func lex(input string) *lexer {
+func lex(input string, tomlNext bool) *lexer {
lx := &lexer{
- input: input,
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
+ input: input,
+ state: lexTop,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ line: 1,
+ tomlNext: tomlNext,
}
return lx
}
@@ -125,19 +119,36 @@ func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
+func (lx lexer) getPos() Position {
+ p := Position{
+ Line: lx.line,
+ Start: lx.start,
+ Len: lx.pos - lx.start,
+ }
+ if p.Len <= 0 {
+ p.Len = 1
+ }
+ return p
+}
+
func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
+ // Needed for multiline strings ending with an incomplete UTF-8 sequence.
+ if lx.start > lx.pos {
+ lx.error(errLexUTF8{lx.input[lx.pos]})
+ return
+ }
+ lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
- panic("next called after EOF")
+ panic("BUG in lexer: next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
@@ -147,12 +158,25 @@ func (lx *lexer) next() (r rune) {
if lx.input[lx.pos] == '\n' {
lx.line++
}
+ lx.prevWidths[3] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
+ if lx.nprev < 4 {
lx.nprev++
}
+
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ if r == utf8.RuneError && w == 1 {
+ lx.error(errLexUTF8{lx.input[lx.pos]})
+ return utf8.RuneError
+ }
+
+ // Note: don't use peek() here, as this calls next().
+ if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
+ lx.errorControlChar(r)
+ return utf8.RuneError
+ }
+
lx.prevWidths[0] = w
lx.pos += w
return r
@@ -163,19 +187,21 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
-// backup steps back one rune. Can be called only twice between calls to next.
+// backup steps back one rune. Can be called 4 times between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
- panic("backed up too far")
+ panic("BUG in lexer: backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
+ lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev--
+
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
@@ -211,18 +237,58 @@ func (lx *lexer) skip(pred func(rune) bool) {
}
}
-// errorf stops all lexing by emitting an error and returning `nil`.
+// error stops all lexing by emitting an error and returning `nil`.
+//
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
+func (lx *lexer) error(err error) stateFn {
+ if lx.atEOF {
+ return lx.errorPrevLine(err)
}
+ lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
+ return nil
+}
+
+// errorfPrevline is like error(), but sets the position to the last column of
+// the previous line.
+//
+// This is so that unexpected EOF or NL errors don't show on a new blank line.
+func (lx *lexer) errorPrevLine(err error) stateFn {
+ pos := lx.getPos()
+ pos.Line--
+ pos.Len = 1
+ pos.Start = lx.pos - 1
+ lx.items <- item{typ: itemError, pos: pos, err: err}
+ return nil
+}
+
+// errorPos is like error(), but allows explicitly setting the position.
+func (lx *lexer) errorPos(start, length int, err error) stateFn {
+ pos := lx.getPos()
+ pos.Start = start
+ pos.Len = length
+ lx.items <- item{typ: itemError, pos: pos, err: err}
return nil
}
+// errorf is like error, and creates a new error.
+func (lx *lexer) errorf(format string, values ...any) stateFn {
+ if lx.atEOF {
+ pos := lx.getPos()
+ pos.Line--
+ pos.Len = 1
+ pos.Start = lx.pos - 1
+ lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
+ return nil
+ }
+ lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
+ return nil
+}
+
+func (lx *lexer) errorControlChar(cc rune) stateFn {
+ return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
+}
+
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
@@ -230,10 +296,10 @@ func lexTop(lx *lexer) stateFn {
return lexSkip(lx, lexTop)
}
switch r {
- case commentStart:
+ case '#':
lx.push(lexTop)
return lexCommentStart
- case tableStart:
+ case '[':
return lexTableStart
case eof:
if lx.pos > lx.start {
@@ -256,7 +322,7 @@ func lexTop(lx *lexer) stateFn {
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
- case r == commentStart:
+ case r == '#':
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
@@ -269,8 +335,7 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
+ return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -279,7 +344,7 @@ func lexTopEnd(lx *lexer) stateFn {
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
+ if lx.peek() == '[' {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
@@ -296,9 +361,8 @@ func lexTableEnd(lx *lexer) stateFn {
}
func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
+ if r := lx.next(); r != ']' {
+ return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@@ -307,31 +371,18 @@ func lexArrayTableEnd(lx *lexer) stateFn {
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
- case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
- case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
+ case r == ']' || r == eof:
+ return lx.errorf("unexpected end of table name (table names cannot be empty)")
+ case r == '.':
+ return lx.errorf("unexpected table separator (table names cannot be empty)")
+ case r == '"' || r == '\'':
lx.ignore()
lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
+ return lexQuotedName
default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
+ lx.push(lexTableNameEnd)
+ return lexBareName
}
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
@@ -341,69 +392,107 @@ func lexTableNameEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
- case r == tableSep:
+ case r == '.':
lx.ignore()
return lexTableNameStart
- case r == tableEnd:
+ case r == ']':
return lx.pop()
default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
}
}
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r, lx.tomlNext) {
+ return lexBareName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+ r := lx.next()
switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case r == '"':
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == '\'':
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected value")
default:
+ return lx.errorf("expected value but found %q instead", r)
+ }
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '=': key name appears blank")
+ case r == '.':
+ return lx.errorf("unexpected '.': keys cannot start with a '.'")
+ case r == '"' || r == '\'':
lx.ignore()
+ fallthrough
+ default: // Bare key
lx.emit(itemKeyStart)
- return lexBareKey
+ return lexKeyNameStart
}
}
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
+func lexKeyNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '='")
+ case r == '.':
+ return lx.errorf("unexpected '.'")
+ case r == '"' || r == '\'':
+ lx.ignore()
+ lx.push(lexKeyEnd)
+ return lexQuotedName
default:
- return lx.errorf("bare keys cannot contain %q", r)
+ lx.push(lexKeyEnd)
+ return lexBareName
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected key separator '='")
+ case r == '.':
+ lx.ignore()
+ return lexKeyNameStart
+ case r == '=':
+ lx.emit(itemKeyEnd)
+ return lexSkip(lx, lexValue)
default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
+ return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -422,17 +511,17 @@ func lexValue(lx *lexer) stateFn {
return lexNumberOrDateStart
}
switch r {
- case arrayStart:
+ case '[':
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
- case inlineTableStart:
+ case '{':
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
+ case '"':
+ if lx.accept('"') {
+ if lx.accept('"') {
lx.ignore() // Ignore """
return lexMultilineString
}
@@ -440,9 +529,9 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the '"'
return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
+ case '\'':
+ if lx.accept('\'') {
+ if lx.accept('\'') {
lx.ignore() // Ignore """
return lexMultilineRawString
}
@@ -450,10 +539,15 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the "'"
return lexRawString
- case '+', '-':
- return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
+ case 'i', 'n':
+ if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+ lx.emit(itemFloat)
+ return lx.pop()
+ }
+ case '-', '+':
+ return lexDecimalNumberStart
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
@@ -463,6 +557,9 @@ func lexValue(lx *lexer) stateFn {
lx.backup()
return lexBool
}
+ if r == eof {
+ return lx.errorf("unexpected EOF; expected value")
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -473,14 +570,12 @@ func lexArrayValue(lx *lexer) stateFn {
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
- case r == commentStart:
+ case r == '#':
lx.push(lexArrayValue)
return lexCommentStart
- case r == comma:
+ case r == ',':
return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
+ case r == ']':
return lexArrayEnd
}
@@ -493,23 +588,20 @@ func lexArrayValue(lx *lexer) stateFn {
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
+ switch r := lx.next(); {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
+ case r == '#':
lx.push(lexArrayValueEnd)
return lexCommentStart
- case r == comma:
+ case r == ',':
lx.ignore()
return lexArrayValue // move on to the next value
- case r == arrayEnd:
+ case r == ']':
return lexArrayEnd
+ default:
+ return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
}
- return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
}
// lexArrayEnd finishes the lexing of an array.
@@ -528,13 +620,16 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValue)
+ }
+ return lx.errorPrevLine(errLexInlineTableNL{})
+ case r == '#':
lx.push(lexInlineTableValue)
return lexCommentStart
- case r == comma:
+ case r == ',':
return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
+ case r == '}':
return lexInlineTableEnd
}
lx.backup()
@@ -546,23 +641,39 @@ func lexInlineTableValue(lx *lexer) stateFn {
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
+ switch r := lx.next(); {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValueEnd)
+ }
+ return lx.errorPrevLine(errLexInlineTableNL{})
+ case r == '#':
lx.push(lexInlineTableValueEnd)
return lexCommentStart
- case r == comma:
+ case r == ',':
lx.ignore()
+ lx.skip(isWhitespace)
+ if lx.peek() == '}' {
+ if lx.tomlNext {
+ return lexInlineTableValueEnd
+ }
+ return lx.errorf("trailing comma not allowed in inline tables")
+ }
return lexInlineTableValue
- case r == inlineTableEnd:
+ case r == '}':
return lexInlineTableEnd
+ default:
+ return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
}
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
+}
+
+func runeOrEOF(r rune) string {
+ if r == eof {
+ return "end of file"
+ }
+ return "'" + string(r) + "'"
}
// lexInlineTableEnd finishes the lexing of an inline table.
@@ -579,15 +690,20 @@ func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"'`)
case isNL(r):
- return lx.errorf("strings cannot contain newlines")
+ return lx.errorPrevLine(errLexStringNL{})
case r == '\\':
lx.push(lexString)
return lexStringEscape
- case r == stringEnd:
+ case r == '"':
lx.backup()
- lx.emit(itemString)
+ if lx.esc {
+ lx.esc = false
+ lx.emit(itemStringEsc)
+ } else {
+ lx.emit(itemString)
+ }
lx.next()
lx.ignore()
return lx.pop()
@@ -598,19 +714,48 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
+ default:
+ return lexMultilineString
case eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"""'`)
case '\\':
return lexMultilineStringEscape
- case stringEnd:
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
- lx.backup()
+ case '"':
+ /// Found " → try to read two more "".
+ if lx.accept('"') {
+ if lx.accept('"') {
+ /// Peek ahead: the string can contain " and "", including at the
+ /// end: """str"""""
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == '"' {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ ///
+ /// Second check is for the edge case:
+ ///
+ /// two quotes allowed.
+ /// vv
+ /// """lol \""""""
+ /// ^^ ^^^---- closing three
+ /// escaped
+ ///
+ /// But ugly, but it works
+ if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
+ return lx.errorf(`unexpected '""""""'`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineString
+ }
+
+ lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
+ lx.esc = false
lx.emit(itemMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -618,8 +763,8 @@ func lexMultilineString(lx *lexer) stateFn {
}
lx.backup()
}
+ return lexMultilineString
}
- return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
@@ -627,35 +772,54 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
+ default:
+ return lexRawString
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected "'"`)
case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
+ return lx.errorPrevLine(errLexStringNL{})
+ case r == '\'':
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
- return lexRawString
}
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
+// string. It assumes that the beginning triple-' has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
+ default:
+ return lexMultilineRawString
case eof:
- return lx.errorf("unexpected EOF")
- case rawStringEnd:
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
- lx.backup()
+ return lx.errorf(`unexpected EOF; expected "'''"`)
+ case '\'':
+ /// Found ' → try to read two more ''.
+ if lx.accept('\'') {
+ if lx.accept('\'') {
+ /// Peek ahead: the string can contain ' and '', including at the
+ /// end: '''str'''''
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == '\'' {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), "'''''") {
+ return lx.errorf(`unexpected "''''''"`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineRawString
+ }
+
+ lx.backup() /// backup: don't include the ''' in the item.
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -663,15 +827,14 @@ func lexMultilineRawString(lx *lexer) stateFn {
}
lx.backup()
}
+ return lexMultilineRawString
}
- return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
+ if isNL(lx.next()) { /// \ escaping newline.
return lexMultilineString
}
lx.backup()
@@ -680,8 +843,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
}
func lexStringEscape(lx *lexer) stateFn {
+ lx.esc = true
r := lx.next()
switch r {
+ case 'e':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ fallthrough
case 'b':
fallthrough
case 't':
@@ -694,25 +863,42 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '"':
fallthrough
+ case ' ', '\t':
+ // Inside """ .. """ strings you can use \ to escape newlines, and any
+ // amount of whitespace can be between the \ and \n.
+ fallthrough
case '\\':
return lx.pop()
+ case 'x':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ return lexHexEscape
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+ return lx.error(errLexEscape{r})
+}
+
+func lexHexEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 2; i++ {
+ r = lx.next()
+ if !isHex(r) {
+ return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
+ }
+ }
+ return lx.pop()
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -722,29 +908,32 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
}
}
return lx.pop()
}
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
+ case '0':
+ return lexBaseNumberOrDate
}
- return lx.errorf("expected a digit but got %q", r)
+
+ if !isDigit(r) {
+ // The only way to reach this state is if the value starts
+ // with a digit, so specifically treat anything else as an
+ // error.
+ return lx.errorf("expected a digit but got %q", r)
+ }
+
+ return lexNumberOrDate
}
// lexNumberOrDate consumes either an integer, float or datetime.
@@ -754,10 +943,10 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lexNumberOrDate
}
switch r {
- case '-':
+ case '-', ':':
return lexDatetime
case '_':
- return lexNumber
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
}
@@ -775,41 +964,156 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime
}
switch r {
- case '-', 'T', ':', '.', 'Z', '+':
+ case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
return lexDatetime
}
lx.backup()
- lx.emit(itemDatetime)
+ lx.emitTrim(itemDatetime)
return lx.pop()
}
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
+ if isHex(r) {
+ return lexHexInteger
+ }
+ switch r {
+ case '_':
+ return lexHexInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isOctal(r) {
+ return lexOctalInteger
+ }
+ switch r {
+ case '_':
+ return lexOctalInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isBinary(r) {
+ return lexBinaryInteger
+ }
+ switch r {
+ case '_':
+ return lexBinaryInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDecimalNumber
+ }
+ switch r {
+ case '.', 'e', 'E':
+ return lexFloat
+ case '_':
+ return lexDecimalNumber
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+ r := lx.next()
+
+ // Special error cases to give users better error messages
+ switch r {
+ case 'i':
+ if !lx.accept('n') || !lx.accept('f') {
+ return lx.errorf("invalid float: '%s'", lx.current())
}
- return lx.errorf("expected a digit but got %q", r)
+ lx.emit(itemFloat)
+ return lx.pop()
+ case 'n':
+ if !lx.accept('a') || !lx.accept('n') {
+ return lx.errorf("invalid float: '%s'", lx.current())
+ }
+ lx.emit(itemFloat)
+ return lx.pop()
+ case '0':
+ p := lx.peek()
+ switch p {
+ case 'b', 'o', 'x':
+ return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+ }
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+
+ if isDigit(r) {
+ return lexDecimalNumber
}
- return lexNumber
+
+ return lx.errorf("expected a digit but got %q", r)
}
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
r := lx.next()
+ // Note: All datetimes start with at least two digits, so we don't
+ // handle date characters (':', '-', etc.) here.
if isDigit(r) {
- return lexNumber
+ return lexNumberOrDate
}
switch r {
case '_':
- return lexNumber
+ // Can only be decimal, because there can't be an underscore
+ // between the '0' and the base designator, and dates can't
+ // contain underscores.
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
+ case 'b':
+ r = lx.peek()
+ if !isBinary(r) {
+ lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+ }
+ return lexBinaryInteger
+ case 'o':
+ r = lx.peek()
+ if !isOctal(r) {
+ lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+ }
+ return lexOctalInteger
+ case 'x':
+ r = lx.peek()
+ if !isHex(r) {
+ lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ }
+ return lexHexInteger
}
lx.backup()
@@ -867,49 +1171,31 @@ func lexCommentStart(lx *lexer) stateFn {
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
+ switch r := lx.next(); {
+ case isNL(r) || r == eof:
+ lx.backup()
lx.emit(itemText)
return lx.pop()
+ default:
+ return lexComment
}
- lx.next()
- return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
+ lx.ignore()
+ return nextState
}
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
+func (s stateFn) String() string {
+ name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+ if i := strings.LastIndexByte(name, '.'); i > -1 {
+ name = name[i+1:]
+ }
+ if s == nil {
+ name = ""
+ }
+ return name + "()"
}
func (itype itemType) String() string {
@@ -922,7 +1208,7 @@ func (itype itemType) String() string {
return "EOF"
case itemText:
return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
@@ -938,16 +1224,58 @@ func (itype itemType) String() string {
return "TableEnd"
case itemKeyStart:
return "KeyStart"
+ case itemKeyEnd:
+ return "KeyEnd"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
+ case itemInlineTableStart:
+ return "InlineTableStart"
+ case itemInlineTableEnd:
+ return "InlineTableEnd"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+ return fmt.Sprintf("(%s, %s)", item.typ, item.val)
+}
+
+func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
+func isNL(r rune) bool { return r == '\n' || r == '\r' }
+func isControl(r rune) bool { // Control characters except \t, \r, \n
+ switch r {
+ case '\t', '\r', '\n':
+ return false
+ default:
+ return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+ }
+}
+func isDigit(r rune) bool { return r >= '0' && r <= '9' }
+func isBinary(r rune) bool { return r == '0' || r == '1' }
+func isOctal(r rune) bool { return r >= '0' && r <= '7' }
+func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
+func isBareKeyChar(r rune, tomlNext bool) bool {
+ if tomlNext {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' || r == '-' ||
+ r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
+ (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
+ (r >= 0x037f && r <= 0x1fff) ||
+ (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
+ (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
+ (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
+ (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
+ (r >= 0x10000 && r <= 0xeffff)
+ }
+
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' || r == '-'
}
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
new file mode 100644
index 000000000..e61453730
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -0,0 +1,148 @@
+package toml
+
+import (
+ "strings"
+)
+
+// MetaData allows access to meta information about TOML data that's not
+// accessible otherwise.
+//
+// It allows checking if a key is defined in the TOML data, whether any keys
+// were undecoded, and the TOML type of a key.
+type MetaData struct {
+ context Key // Used only during decoding.
+
+ keyInfo map[string]keyInfo
+ mapping map[string]any
+ keys []Key
+ decoded map[string]struct{}
+ data []byte // Input file; for errors.
+}
+
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
+//
+// Returns false for an empty key.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var (
+ hash map[string]any
+ ok bool
+ hashOrVal any = md.mapping
+ )
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]any); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ if ki, ok := md.keyInfo[Key(key).String()]; ok {
+ return ki.tomlType.typeString()
+ }
+ return ""
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+//
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a [Primitive] value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if _, ok := md.decoded[key.String()]; !ok {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
+
+// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string {
+ // This is called quite often, so it's a bit funky to make it faster.
+ var b strings.Builder
+ b.Grow(len(k) * 25)
+outer:
+ for i, kk := range k {
+ if i > 0 {
+ b.WriteByte('.')
+ }
+ if kk == "" {
+ b.WriteString(`""`)
+ } else {
+ for _, r := range kk {
+ // "Inline" isBareKeyChar
+ if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
+ b.WriteByte('"')
+ b.WriteString(dblQuotedReplacer.Replace(kk))
+ b.WriteByte('"')
+ continue outer
+ }
+ }
+ b.WriteString(kk)
+ }
+ }
+ return b.String()
+}
+
+func (k Key) maybeQuoted(i int) string {
+ if k[i] == "" {
+ return `""`
+ }
+ for _, r := range k[i] {
+ if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
+ continue
+ }
+ return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+ }
+ return k[i]
+}
+
+// Like append(), but only increase the cap by 1.
+func (k Key) add(piece string) Key {
+ if cap(k) > len(k) {
+ return append(k, piece)
+ }
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
+func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 50869ef92..11ac3108b 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -2,57 +2,82 @@ package toml
import (
"fmt"
+ "math"
+ "os"
"strconv"
"strings"
"time"
- "unicode"
"unicode/utf8"
+
+ "github.com/BurntSushi/toml/internal"
)
type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
+ lx *lexer
+ context Key // Full key for the current hash in scope.
+ currentKey string // Base key name for everything except hashes.
+ pos Position // Current position in the TOML file.
+ tomlNext bool
- // the base key name for everything except hashes
- currentKey string
+ ordered []Key // List of keys in the order that they appear in the TOML data.
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
+ keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
+ mapping map[string]any // Map keyname → key value.
+ implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
+type keyInfo struct {
+ pos Position
+ tomlType tomlType
}
func parse(data string) (p *parser, err error) {
+ _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
+
defer func() {
if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
+ if pErr, ok := r.(ParseError); ok {
+ pErr.input = data
+ err = pErr
return
}
panic(r)
}
}()
+ // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+ // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
+ // it anyway.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
+ data = data[2:]
+ //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
+ } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
+ data = data[3:]
+ }
+
+ // Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+ // file (second byte in surrogate pair being NULL). Again, do this here to
+ // avoid having to deal with UTF-8/16 stuff in the lexer.
+ ex := 6
+ if len(data) < 6 {
+ ex = len(data)
+ }
+ if i := strings.IndexRune(data[:ex], 0); i > -1 {
+ return nil, ParseError{
+ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
+ Position: Position{Line: 1, Start: i, Len: 1},
+ Line: 1,
+ input: data,
+ }
+ }
+
p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
+ keyInfo: make(map[string]keyInfo),
+ mapping: make(map[string]any),
+ lx: lex(data, tomlNext),
ordered: make([]Key, 0),
- implicits: make(map[string]bool),
+ implicits: make(map[string]struct{}),
+ tomlNext: tomlNext,
}
for {
item := p.next()
@@ -65,21 +90,58 @@ func parse(data string) (p *parser, err error) {
return p, nil
}
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
+func (p *parser) panicErr(it item, err error) {
+ panic(ParseError{
+ err: err,
+ Position: it.pos,
+ Line: it.pos.Len,
+ LastKey: p.current(),
+ })
+}
+
+func (p *parser) panicItemf(it item, format string, v ...any) {
+ panic(ParseError{
+ Message: fmt.Sprintf(format, v...),
+ Position: it.pos,
+ Line: it.pos.Len,
+ LastKey: p.current(),
+ })
+}
+
+func (p *parser) panicf(format string, v ...any) {
+ panic(ParseError{
+ Message: fmt.Sprintf(format, v...),
+ Position: p.pos,
+ Line: p.pos.Line,
+ LastKey: p.current(),
+ })
}
func (p *parser) next() item {
it := p.lx.nextItem()
+ //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
if it.typ == itemError {
- p.panicf("%s", it.val)
+ if it.err != nil {
+ panic(ParseError{
+ Position: it.pos,
+ Line: it.pos.Line,
+ LastKey: p.current(),
+ err: it.err,
+ })
+ }
+
+ p.panicItemf(it, "%s", it.val)
}
return it
}
-func (p *parser) bug(format string, v ...interface{}) {
+func (p *parser) nextPos() item {
+ it := p.next()
+ p.pos = it.pos
+ return it
+}
+
+func (p *parser) bug(format string, v ...any) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@@ -97,44 +159,61 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
+ case itemCommentStart: // # ..
p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemTableStart: // [ .. ]
+ name := p.nextPos()
var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemTableEnd, kg.typ)
+ p.assertEqual(itemTableEnd, name.typ)
- p.establishContext(key, false)
- p.setType("", tomlHash)
+ p.addContext(key, false)
+ p.setType("", tomlHash, item.pos)
p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemArrayTableStart: // [[ .. ]]
+ name := p.nextPos()
var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemArrayTableEnd, kg.typ)
+ p.assertEqual(itemArrayTableEnd, name.typ)
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
+ p.addContext(key, true)
+ p.setType("", tomlArrayHash, item.pos)
p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
+ case itemKeyStart: // key = ..
+ outerContext := p.context
+ /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+ k := p.nextPos()
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
+ /// The current key is the last part.
+ p.currentKey = key.last()
+
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key.parent()
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
+ }
p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+ /// Set value.
+ vItem := p.next()
+ val, typ := p.value(vItem, false)
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, vItem.pos)
+
+ /// Remove the context we added (preserving any context from [tbl] lines).
+ p.context = outerContext
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
@@ -146,182 +225,311 @@ func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
- case itemString, itemMultilineString,
+ case itemString, itemStringEsc, itemMultilineString,
itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
+ s, _ := p.value(it, false)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
}
+ panic("unreachable")
}
+var datetimeRepl = strings.NewReplacer(
+ "z", "Z",
+ "t", "T",
+ " ", "T")
+
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
switch it.typ {
case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ return it.val, p.typeOfPrimitive(it)
+ case itemStringEsc:
+ return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemInteger:
+ return p.valueInteger(it)
+ case itemFloat:
+ return p.valueFloat(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
+ default:
+ p.bug("Expected boolean value, but got '%s'.", it.val)
}
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
+ case itemDatetime:
+ return p.valueDatetime(it)
+ case itemArray:
+ return p.valueArray(it)
+ case itemInlineTableStart:
+ return p.valueInlineTable(it, parentIsArray)
+ default:
+ p.bug("Unexpected value type: %s", it.typ)
+ }
+ panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (any, tomlType) {
+ if !numUnderscoresOK(it.val) {
+ p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
+ }
+ if numHasLeadingZero(it.val) {
+ p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
+ }
+
+ num, err := strconv.ParseInt(it.val, 0, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicErr(it, errParseRange{i: it.val, size: "int64"})
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
}
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (any, tomlType) {
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
+ }
+ }
+ if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+ p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ signbit := false
+ if val == "+nan" || val == "-nan" {
+ signbit = val == "-nan"
+ val = "nan"
+ }
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicErr(it, errParseRange{i: it.val, size: "float64"})
+ } else {
+ p.panicItemf(it, "Invalid float value: %q", it.val)
+ }
+ }
+ if signbit {
+ num = math.Copysign(num, -1)
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+ fmt string
+ zone *time.Location
+ next bool
+}{
+ {time.RFC3339Nano, time.Local, false},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
+ {"2006-01-02", internal.LocalDate, false},
+ {"15:04:05.999999999", internal.LocalTime, false},
+
+ // tomlNext
+ {"2006-01-02T15:04Z07:00", time.Local, true},
+ {"2006-01-02T15:04", internal.LocalDatetime, true},
+ {"15:04", internal.LocalTime, true},
+}
+
+func (p *parser) valueDatetime(it item) (any, tomlType) {
+ it.val = datetimeRepl.Replace(it.val)
+ var (
+ t time.Time
+ ok bool
+ err error
+ )
+ for _, dt := range dtTypes {
+ if dt.next && !p.tomlNext {
+ continue
+ }
+ t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+ if err == nil {
+ if missingLeadingZero(it.val, dt.fmt) {
+ p.panicErr(it, errParseDate{it.val})
}
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicErr(it, errParseDate{it.val})
+ }
+ return t, p.typeOfPrimitive(it)
+}
+
+// Go's time.Parse() will accept numbers without a leading zero; there isn't any
+// way to require it. https://github.com/golang/go/issues/29911
+//
+// Depend on the fact that the separators (- and :) should always be at the same
+// location.
+func missingLeadingZero(d, l string) bool {
+ for i, c := range []byte(l) {
+ if c == '.' || c == 'Z' {
return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
}
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
+ if (c < '0' || c > '9') && d[i] != c {
+ return true
}
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
+ }
+ return false
+}
+
+func (p *parser) valueArray(it item) (any, tomlType) {
+ p.setType(p.currentKey, tomlArray, it.pos)
+
+ var (
+ // Initialize to a non-nil slice to make it consistent with how S = []
+ // decodes into a non-nil slice inside something like struct { S
+ // []string }. See #338
+ array = make([]any, 0, 2)
+ )
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
}
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
+
+ val, typ := p.value(it, true)
+ array = append(array, val)
+
+ // XXX: type isn't used here, we need it to record the accurate type
+ // information.
+ //
+ // Not entirely sure how to best store this; could use "key[0]",
+ // "key[1]" notation, or maybe store it on the Array type?
+ _ = typ
+ }
+ return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
+ var (
+ topHash = make(map[string]any)
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ prevContext := p.context
+ p.currentKey = ""
+
+ p.addImplicit(p.context)
+ p.addContext(p.context, parentIsArray)
+
+ /// Loop over all table key/value pairs.
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
}
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
+ /// Read all key parts.
+ k := p.nextPos()
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
+
+ /// The current key is the last part.
+ p.currentKey = key.last()
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key.parent()
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
}
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
+ /// Set the value.
+ val, typ := p.value(p.next(), false)
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, it.pos)
+
+ hash := topHash
+ for _, c := range context {
+ h, ok := hash[c]
+ if !ok {
+ h = make(map[string]any)
+ hash[c] = h
}
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
+ hash, ok = h.(map[string]any)
+ if !ok {
+ p.panicf("%q is not a table", p.context)
}
+ }
+ hash[p.currentKey] = val
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
+ /// Restore context.
+ p.context = prevContext
+ }
+ p.context = outerContext
+ p.currentKey = outerKey
+ return topHash, tomlHash
+}
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
- }
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+ if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
+ return true
}
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
+ if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+ return true
+ }
+ return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
+ switch s {
+ case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+ return true
+ }
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
- accept = false
- continue
}
- accept = true
+
+ // isHexis a superset of all the permissable characters surrounding an
+ // underscore.
+ accept = isHex(r)
}
return accept
}
@@ -338,28 +546,25 @@ func numPeriodsOK(s string) bool {
return !period
}
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
+func (p *parser) addContext(key Key, array bool) {
+ /// Always start at the top level and drill down for our context.
hashContext := p.mapping
- keyContext := make(Key, 0)
+ keyContext := make(Key, 0, len(key)-1)
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
+ /// We only need implicit hashes for the parents.
+ for _, k := range key.parent() {
+ _, ok := hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
+ hashContext[k] = make(map[string]any)
}
// If the hash context is actually an array of tables, then set
@@ -368,9 +573,9 @@ func (p *parser) establishContext(key Key, array bool) {
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
- case []map[string]interface{}:
+ case []map[string]any:
hashContext = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
@@ -381,109 +586,106 @@ func (p *parser) establishContext(key Key, array bool) {
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
- k := key[len(key)-1]
+ k := key.last()
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
+ hashContext[k] = make([]map[string]any, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
+ if hash, ok := hashContext[k].([]map[string]any); ok {
+ hashContext[k] = append(hash, make(map[string]any))
} else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
+ p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
+ p.setValue(key.last(), make(map[string]any))
}
- p.context = append(p.context, key[len(key)-1])
+ p.context = append(p.context, key.last())
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
+func (p *parser) setValue(key string, value any) {
+ var (
+ tmpHash any
+ ok bool
+ hash = p.mapping
+ keyContext = make(Key, 0, len(p.context)+1)
+ )
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
- case []map[string]interface{}:
+ case []map[string]any:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hash = t
default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
+ p.panicf("Key '%s' has already been defined.", keyContext)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ // Normally redefining keys isn't allowed, but the key could have been
+ // defined implicitly and it's allowed to be redefined concretely. (See
+ // the `valid/implicit-and-explicit-after.toml` in toml-test)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
+ if p.isArray(keyContext) {
+ p.removeImplicit(keyContext)
+ hash[key] = value
+ return
+ }
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
+ // Otherwise, we have a concrete key trying to override a previous key,
+ // which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
+
hash[key] = value
}
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
+// setType sets the type of a particular value at a given key. It should be
+// called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
+func (p *parser) setType(key string, typ tomlType, pos Position) {
keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
+ keyContext = append(keyContext, p.context...)
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
+ // Special case to make empty keys ("" = 1) work.
+ // Without it it will set "" rather than `""`.
+ // TODO: why is this needed? And why is this only needed here?
+ if len(keyContext) == 0 {
+ keyContext = Key{""}
+ }
+ p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
}
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
+func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
// current returns the full key name of the current context.
func (p *parser) current() string {
@@ -497,96 +699,146 @@ func (p *parser) current() string {
}
func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
+ if len(s) > 0 && s[0] == '\n' {
+ return s[1:]
+ }
+ if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+ return s[2:]
}
- return s[1:]
+ return s
}
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+// stripEscapedNewlines removes whitespace after line-ending backslashes in
+// multiline strings.
+//
+// A line-ending backslash is an unescaped \ followed only by whitespace until
+// the next newline. After a line-ending backslash, all whitespace is removed
+// until the next non-whitespace character.
+func (p *parser) stripEscapedNewlines(s string) string {
+ var (
+ b strings.Builder
+ i int
+ )
+ b.Grow(len(s))
+ for {
+ ix := strings.Index(s[i:], `\`)
+ if ix < 0 {
+ b.WriteString(s)
+ return b.String()
+ }
+ i += ix
+
+ if len(s) > i+1 && s[i+1] == '\\' {
+ // Escaped backslash.
+ i += 2
+ continue
}
+ // Scan until the next non-whitespace.
+ j := i + 1
+ whitespaceLoop:
+ for ; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\r', '\n':
+ default:
+ break whitespaceLoop
+ }
+ }
+ if j == i+1 {
+ // Not a whitespace escape.
+ i++
+ continue
+ }
+ if !strings.Contains(s[i:j], "\n") {
+ // This is not a line-ending backslash. (It's a bad escape sequence,
+ // but we can let replaceEscapes catch it.)
+ i++
+ continue
+ }
+ b.WriteString(s[:i])
+ s = s[j:]
+ i = 0
}
- return strings.Join(esc, "")
}
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
+func (p *parser) replaceEscapes(it item, str string) string {
+ var (
+ b strings.Builder
+ skip = 0
+ )
+ b.Grow(len(str))
+ for i, c := range str {
+ if skip > 0 {
+ skip--
+ continue
+ }
+ if c != '\\' {
+ b.WriteRune(c)
continue
}
- r += 1
- if r >= len(s) {
+
+ if i >= len(str) {
p.bug("Escape sequence at end of string.")
return ""
}
- switch s[r] {
+ switch str[i+1] {
default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
+ p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
+ case ' ', '\t':
+ p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
+ b.WriteByte(0x08)
+ skip = 1
case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
+ b.WriteByte(0x09)
+ skip = 1
case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
+ b.WriteByte(0x0a)
+ skip = 1
case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
+ b.WriteByte(0x0c)
+ skip = 1
case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
+ b.WriteByte(0x0d)
+ skip = 1
+ case 'e':
+ if p.tomlNext {
+ b.WriteByte(0x1b)
+ skip = 1
+ }
case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
+ b.WriteByte(0x22)
+ skip = 1
case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
+ b.WriteByte(0x5c)
+ skip = 1
+ // The lexer guarantees the correct number of characters are present;
+ // don't need to check here.
+ case 'x':
+ if p.tomlNext {
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
+ b.WriteRune(escaped)
+ skip = 3
+ }
case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
- replaced = append(replaced, escaped)
- r += 5
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
+ b.WriteRune(escaped)
+ skip = 5
case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
- replaced = append(replaced, escaped)
- r += 9
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
+ b.WriteRune(escaped)
+ skip = 9
}
}
- return string(replaced)
+ return b.String()
}
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
- s := string(bs)
+func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
+ p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164be0..000000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8afc1..000000000
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemMultilineString:
- return tomlString
- case itemRawString:
- return tomlString
- case itemRawMultilineString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 608997c22..10c51f7ee 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -25,10 +25,8 @@ type field struct {
// breaking ties with index sequence.
type byName []field
-func (x byName) Len() int { return len(x) }
-
+func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
@@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
// byIndex sorts field by index sequence.
type byIndex []field
-func (x byIndex) Len() int { return len(x) }
-
+func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
@@ -70,8 +66,8 @@ func typeFields(t reflect.Type) []field {
next := []field{{typ: t}}
// Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
+ var count map[reflect.Type]int
+ var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go
new file mode 100644
index 000000000..1c090d331
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_toml.go
@@ -0,0 +1,65 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsTable(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string { return string(btype) }
+func (btype tomlBaseType) String() string { return btype.typeString() }
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString, itemStringEsc:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes
new file mode 100644
index 000000000..94f480de9
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitattributes
@@ -0,0 +1 @@
+* text=auto eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore
index b883f1fdc..815e20660 100644
--- a/vendor/github.com/Microsoft/go-winio/.gitignore
+++ b/vendor/github.com/Microsoft/go-winio/.gitignore
@@ -1 +1,10 @@
+.vscode/
+
*.exe
+
+# testing
+testdata
+
+# go workspaces
+go.work
+go.work.sum
diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml
new file mode 100644
index 000000000..faedfe937
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml
@@ -0,0 +1,147 @@
+linters:
+ enable:
+ # style
+ - containedctx # struct contains a context
+ - dupl # duplicate code
+ - errname # erorrs are named correctly
+ - nolintlint # "//nolint" directives are properly explained
+ - revive # golint replacement
+ - unconvert # unnecessary conversions
+ - wastedassign
+
+ # bugs, performance, unused, etc ...
+ - contextcheck # function uses a non-inherited context
+ - errorlint # errors not wrapped for 1.13
+ - exhaustive # check exhaustiveness of enum switch statements
+ - gofmt # files are gofmt'ed
+ - gosec # security
+ - nilerr # returns nil even with non-nil error
+ - thelper # test helpers without t.Helper()
+ - unparam # unused function params
+
+issues:
+ exclude-dirs:
+ - pkg/etw/sample
+
+ exclude-rules:
+ # err is very often shadowed in nested scopes
+ - linters:
+ - govet
+ text: '^shadow: declaration of "err" shadows declaration'
+
+ # ignore long lines for skip autogen directives
+ - linters:
+ - revive
+ text: "^line-length-limit: "
+ source: "^//(go:generate|sys) "
+
+ #TODO: remove after upgrading to go1.18
+ # ignore comment spacing for nolint and sys directives
+ - linters:
+ - revive
+ text: "^comment-spacings: no space between comment delimiter and comment text"
+ source: "//(cspell:|nolint:|sys |todo)"
+
+ # not on go 1.18 yet, so no any
+ - linters:
+ - revive
+ text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
+
+ # allow unjustified ignores of error checks in defer statements
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errcheck` should provide explanation"
+ source: '^\s*defer '
+
+ # allow unjustified ignores of error lints for io.EOF
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errorlint` should provide explanation"
+ source: '[=|!]= io.EOF'
+
+
+linters-settings:
+ exhaustive:
+ default-signifies-exhaustive: true
+ govet:
+ enable-all: true
+ disable:
+ # struct order is often for Win32 compat
+ # also, ignore pointer bytes/GC issues for now until performance becomes an issue
+ - fieldalignment
+ nolintlint:
+ require-explanation: true
+ require-specific: true
+ revive:
+ # revive is more configurable than static check, so likely the preferred alternative to static-check
+ # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
+ enable-all-rules:
+ true
+ # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
+ rules:
+ # rules with required arguments
+ - name: argument-limit
+ disabled: true
+ - name: banned-characters
+ disabled: true
+ - name: cognitive-complexity
+ disabled: true
+ - name: cyclomatic
+ disabled: true
+ - name: file-header
+ disabled: true
+ - name: function-length
+ disabled: true
+ - name: function-result-limit
+ disabled: true
+ - name: max-public-structs
+ disabled: true
+ # geneally annoying rules
+ - name: add-constant # complains about any and all strings and integers
+ disabled: true
+ - name: confusing-naming # we frequently use "Foo()" and "foo()" together
+ disabled: true
+ - name: flag-parameter # excessive, and a common idiom we use
+ disabled: true
+ - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
+ disabled: true
+ # general config
+ - name: line-length-limit
+ arguments:
+ - 140
+ - name: var-naming
+ arguments:
+ - []
+ - - CID
+ - CRI
+ - CTRD
+ - DACL
+ - DLL
+ - DOS
+ - ETW
+ - FSCTL
+ - GCS
+ - GMSA
+ - HCS
+ - HV
+ - IO
+ - LCOW
+ - LDAP
+ - LPAC
+ - LTSC
+ - MMIO
+ - NT
+ - OCI
+ - PMEM
+ - PWSH
+ - RX
+ - SACl
+ - SID
+ - SMB
+ - TX
+ - VHD
+ - VHDX
+ - VMID
+ - VPCI
+ - WCOW
+ - WIM
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
index 60c93fe50..7474b4f0b 100644
--- a/vendor/github.com/Microsoft/go-winio/README.md
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -11,12 +11,79 @@ package.
Please see the LICENSE file for licensing information.
-This project has adopted the [Microsoft Open Source Code of
-Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
-see the [Code of Conduct
-FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
-[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
-questions or comments.
-
-Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
-for another named pipe implementation.
+## Contributing
+
+This project welcomes contributions and suggestions.
+Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
+you have the right to, and actually do, grant us the rights to use your contribution.
+For details, visit [Microsoft CLA](https://cla.microsoft.com).
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to
+provide a CLA and decorate the PR appropriately (e.g., label, comment).
+Simply follow the instructions provided by the bot.
+You will only need to do this once across all repos using our CLA.
+
+Additionally, the pull request pipeline requires the following steps to be performed before
+mergining.
+
+### Code Sign-Off
+
+We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
+
+Please see [the developer certificate](https://developercertificate.org) for more info,
+as well as to make sure that you can attest to the rules listed.
+Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
+
+### Linting
+
+Code must pass a linting stage, which uses [`golangci-lint`][lint].
+The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
+automatically with VSCode by adding the following to your workspace or folder settings:
+
+```json
+ "go.lintTool": "golangci-lint",
+ "go.lintOnSave": "package",
+```
+
+Additional editor [integrations options are also available][lint-ide].
+
+Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
+
+```shell
+# use . or specify a path to only lint a package
+# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
+> golangci-lint run ./...
+```
+
+### Go Generate
+
+The pipeline checks that auto-generated code, via `go generate`, are up to date.
+
+This can be done for the entire repo:
+
+```shell
+> go generate ./...
+```
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## Special Thanks
+
+Thanks to [natefinch][natefinch] for the inspiration for this library.
+See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
+
+[lint]: https://golangci-lint.run/
+[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
+[lint-install]: https://golangci-lint.run/usage/install/#local-installation
+
+[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
+[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
+
+[natefinch]: https://github.com/natefinch
diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md
new file mode 100644
index 000000000..869fdfe2b
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go
index 2be34af43..b54341daa 100644
--- a/vendor/github.com/Microsoft/go-winio/backup.go
+++ b/vendor/github.com/Microsoft/go-winio/backup.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -7,15 +8,16 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"runtime"
- "syscall"
"unicode/utf16"
+
+ "github.com/Microsoft/go-winio/internal/fs"
+ "golang.org/x/sys/windows"
)
-//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
-//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
+//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
+//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
const (
BackupData = uint32(iota + 1)
@@ -24,7 +26,7 @@ const (
BackupAlternateData
BackupLink
BackupPropertyData
- BackupObjectId
+ BackupObjectId //revive:disable-line:var-naming ID, not Id
BackupReparseData
BackupSparseBlock
BackupTxfsData
@@ -34,14 +36,16 @@ const (
StreamSparseAttributes = uint32(8)
)
+//nolint:revive // var-naming: ALL_CAPS
const (
- WRITE_DAC = 0x40000
- WRITE_OWNER = 0x80000
- ACCESS_SYSTEM_SECURITY = 0x1000000
+ WRITE_DAC = windows.WRITE_DAC
+ WRITE_OWNER = windows.WRITE_OWNER
+ ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
+ //revive:disable-next-line:var-naming ID, not Id
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
@@ -49,8 +53,8 @@ type BackupHeader struct {
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
-type win32StreamId struct {
- StreamId uint32
+type win32StreamID struct {
+ StreamID uint32
Attributes uint32
Size uint64
NameSize uint32
@@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
- if r.bytesLeft > 0 {
+ if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
@@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
r.bytesLeft = 0
}
}
- if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ if _, err := io.Copy(io.Discard, r); err != nil {
return nil, err
}
}
- var wsi win32StreamId
+ var wsi win32StreamID
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
- Id: wsi.StreamId,
+ Id: wsi.StreamID,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
@@ -100,9 +104,9 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
return nil, err
}
- hdr.Name = syscall.UTF16ToString(name)
+ hdr.Name = windows.UTF16ToString(name)
}
- if wsi.StreamId == BackupSparseBlock {
+ if wsi.StreamID == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
@@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
- wsi := win32StreamId{
- StreamId: hdr.Id,
+ wsi := win32StreamID{
+ StreamID: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
@@ -201,9 +205,9 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
- err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
+ err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
- return 0, &os.PathError{"BackupRead", r.f.Name(), err}
+ return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
}
runtime.KeepAlive(r.f)
if bytesRead == 0 {
@@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
- backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+ _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0
}
@@ -240,9 +244,9 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
// Write restores a portion of the file using the provided backup stream.
func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
- err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
+ err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
- return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
+ return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
}
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) {
@@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
- backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+ _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0
}
@@ -267,11 +271,14 @@ func (w *BackupFileWriter) Close() error {
//
// If the file opened was a directory, it cannot be used with Readdir().
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
- winPath, err := syscall.UTF16FromString(path)
- if err != nil {
- return nil, err
- }
- h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
+ h, err := fs.CreateFile(path,
+ fs.AccessMask(access),
+ fs.FileShareMode(share),
+ nil,
+ fs.FileCreationDisposition(createmode),
+ fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
+ 0,
+ )
if err != nil {
err = &os.PathError{Op: "open", Path: path, Err: err}
return nil, err
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/doc.go b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go
new file mode 100644
index 000000000..965d52ab0
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go
@@ -0,0 +1,3 @@
+// This file only exists to allow go get on non-Windows platforms.
+
+package backuptar
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
deleted file mode 100644
index d39eccf02..000000000
--- a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// +build !windows
-// This file only exists to allow go get on non-Windows platforms.
-
-package backuptar
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
index 341609663..455fd798e 100644
--- a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package backuptar
import (
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
index cb461ca31..7f852bbf8 100644
--- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package backuptar
@@ -5,31 +6,29 @@ package backuptar
import (
"archive/tar"
"encoding/base64"
- "errors"
"fmt"
"io"
- "io/ioutil"
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
)
+//nolint:deadcode,varcheck // keep unused constants for potential future use
const (
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
- c_ISDIR = 040000 // Directory
- c_ISFIFO = 010000 // FIFO
- c_ISREG = 0100000 // Regular file
- c_ISLNK = 0120000 // Symbolic link
- c_ISBLK = 060000 // Block special file
- c_ISCHR = 020000 // Character special file
- c_ISSOCK = 0140000 // Socket
+ cISUID = 0004000 // Set uid
+ cISGID = 0002000 // Set gid
+ cISVTX = 0001000 // Save text (sticky bit)
+ cISDIR = 0040000 // Directory
+ cISFIFO = 0010000 // FIFO
+ cISREG = 0100000 // Regular file
+ cISLNK = 0120000 // Symbolic link
+ cISBLK = 0060000 // Block special file
+ cISCHR = 0020000 // Character special file
+ cISSOCK = 0140000 // Socket
)
const (
@@ -42,26 +41,21 @@ const (
hdrCreationTime = "LIBARCHIVE.creationtime"
)
-func writeZeroes(w io.Writer, count int64) error {
- buf := make([]byte, 8192)
- c := len(buf)
- for i := int64(0); i < count; i += int64(c) {
- if int64(c) > count-i {
- c = int(count - i)
- }
- _, err := w.Write(buf[:c])
- if err != nil {
- return err
- }
+// zeroReader is an io.Reader that always returns 0s.
+type zeroReader struct{}
+
+func (zeroReader) Read(b []byte) (int, error) {
+ for i := range b {
+ b[i] = 0
}
- return nil
+ return len(b), nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
err = io.ErrUnexpectedEOF
}
if err != nil {
@@ -71,16 +65,26 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
+ // We can't seek backwards, since we have already written that data to the tar.Writer.
+ if bhdr.Offset < curOffset {
+ return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset)
+ }
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
- err = writeZeroes(t, bhdr.Offset-curOffset)
+ if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
+ return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err)
+ }
if bhdr.Size == 0 {
+ // A sparse block with size = 0 is used to mark the end of the sparse blocks.
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
+ if n != bhdr.Size {
+ return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset)
+ }
curOffset = bhdr.Offset + n
}
return nil
@@ -101,25 +105,85 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
- if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
- hdr.Mode |= c_ISDIR
+ if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 {
+ hdr.Mode |= cISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
return hdr
}
+// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
+// from the tar header and returns the security descriptor into a byte slice.
+func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
+ if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
+ sd, err := base64.StdEncoding.DecodeString(sdraw)
+ if err != nil {
+ // Not returning sd as-is in the error-case, as base64.DecodeString
+ // may return partially decoded data (not nil or empty slice) in case
+ // of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387
+ return nil, err
+ }
+ return sd, nil
+ }
+ // Maintaining old SDDL-based behavior for backward compatibility. All new
+ // tar headers written by this library will have raw binary for the security
+ // descriptor.
+ if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
+ return winio.SddlToSecurityDescriptor(sddl)
+ }
+ return nil, nil
+}
+
+// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
+// current file from the tar header and returns it as a byte slice.
+func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
+ var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful
+ for k, v := range hdr.PAXRecords {
+ if !strings.HasPrefix(k, hdrEaPrefix) {
+ continue
+ }
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return nil, err
+ }
+ eas = append(eas, winio.ExtendedAttribute{
+ Name: k[len(hdrEaPrefix):],
+ Value: data,
+ })
+ }
+ var eaData []byte
+ var err error
+ if len(eas) != 0 {
+ eaData, err = winio.EncodeExtendedAttributes(eas)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return eaData, nil
+}
+
+// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
+// and encodes it into a byte slice. The file for which this function is called must be a
+// symlink.
+func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
+ _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
+ rp := winio.ReparsePoint{
+ Target: filepath.FromSlash(hdr.Linkname),
+ IsMountPoint: isMountPoint,
+ }
+ return winio.EncodeReparsePoint(&rp)
+}
+
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
-// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
-//
-// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
-//
-// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
+// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
+// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
+// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := BasicInfoHeader(name, size, fileInfo)
@@ -142,7 +206,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
@@ -150,21 +214,21 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
switch bhdr.Id {
case winio.BackupData:
- hdr.Mode |= c_ISREG
+ hdr.Mode |= cISREG
if !readTwice {
dataHdr = bhdr
}
case winio.BackupSecurity:
- sd, err := ioutil.ReadAll(br)
+ sd, err := io.ReadAll(br)
if err != nil {
return err
}
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
- hdr.Mode |= c_ISLNK
+ hdr.Mode |= cISLNK
hdr.Typeflag = tar.TypeSymlink
- reparseBuffer, err := ioutil.ReadAll(br)
+ reparseBuffer, _ := io.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
@@ -175,7 +239,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
hdr.Linkname = rp.Target
case winio.BackupEaData:
- eab, err := ioutil.ReadAll(br)
+ eab, err := io.ReadAll(br)
if err != nil {
return err
}
@@ -209,7 +273,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
for dataHdr == nil {
bhdr, err := br.Next()
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
@@ -221,20 +285,44 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
}
- if dataHdr != nil {
+ // The logic for copying file contents is fairly complicated due to the need for handling sparse files,
+ // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream
+ // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also
+ // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse
+ // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described
+ // in the list at the bottom of this block comment.
+ //
+ // Sparse files can be represented in four different ways, based on the specifics of the file.
+ // - Size = 0:
+ // Previously: BackupRead yields no data stream and no sparse block streams.
+ // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams.
+ // - Size > 0, no allocated ranges:
+ // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with
+ // size = 0 and offset = .
+ // - Size > 0, one allocated range:
+ // BackupRead yields a data stream with size = containing the file contents. There are no
+ // sparse block streams. This is the case if you take a normal file with contents and simply set the
+ // sparse flag on it.
+ // - Size > 0, multiple allocated ranges:
+ // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated
+ // range of the file containing the range contents. Finally there is a sparse block stream with
+ // size = 0 and offset = .
+
+ if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity
// A data stream was found. Copy the data.
- if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
+ // We assume that we will either have a data stream size > 0 XOR have sparse block streams.
+ if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
- _, err = io.Copy(t, br)
- if err != nil {
- return err
+ if _, err = io.Copy(t, br); err != nil {
+ return fmt.Errorf("%s: copying contents from data stream: %w", name, err)
}
- } else {
- err = copySparse(t, br)
- if err != nil {
- return err
+ } else if size > 0 {
+ // As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
+ // These files have no sparse block streams, so skip the copySparse call if file size = 0.
+ if err = copySparse(t, br); err != nil {
+ return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err)
}
}
}
@@ -244,7 +332,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
@@ -252,34 +340,29 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
switch bhdr.Id {
case winio.BackupAlternateData:
- altName := bhdr.Name
- if strings.HasSuffix(altName, ":$DATA") {
- altName = altName[:len(altName)-len(":$DATA")]
- }
- if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
- hdr = &tar.Header{
- Format: hdr.Format,
- Name: name + altName,
- Mode: hdr.Mode,
- Typeflag: tar.TypeReg,
- Size: bhdr.Size,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- }
- err = t.WriteHeader(hdr)
- if err != nil {
- return err
- }
- _, err = io.Copy(t, br)
- if err != nil {
- return err
- }
-
- } else {
+ if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
- return errors.New("tar of sparse alternate data streams is unsupported")
+ return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
+ }
+ altName := strings.TrimSuffix(bhdr.Name, ":$DATA")
+ hdr = &tar.Header{
+ Format: hdr.Format,
+ Name: name + altName,
+ Mode: hdr.Mode,
+ Typeflag: tar.TypeReg,
+ Size: bhdr.Size,
+ ModTime: hdr.ModTime,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ err = t.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(t, br)
+ if err != nil {
+ return err
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
@@ -294,7 +377,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
- if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
+ if hdr.Typeflag == tar.TypeReg {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
@@ -312,7 +395,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
fileInfo.FileAttributes = uint32(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
- fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
+ fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY
}
}
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
@@ -322,7 +405,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
}
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
}
- return
+ return name, size, fileInfo, err
}
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
@@ -330,21 +413,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
- var sd []byte
- var err error
- // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
- // by this library will have raw binary for the security descriptor.
- if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
- sd, err = winio.SddlToSecurityDescriptor(sddl)
- if err != nil {
- return nil, err
- }
- }
- if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
- sd, err = base64.StdEncoding.DecodeString(sdraw)
- if err != nil {
- return nil, err
- }
+
+ sd, err := SecurityDescriptorFromTarHeader(hdr)
+ if err != nil {
+ return nil, err
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
@@ -360,25 +432,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
return nil, err
}
}
- var eas []winio.ExtendedAttribute
- for k, v := range hdr.PAXRecords {
- if !strings.HasPrefix(k, hdrEaPrefix) {
- continue
- }
- data, err := base64.StdEncoding.DecodeString(v)
- if err != nil {
- return nil, err
- }
- eas = append(eas, winio.ExtendedAttribute{
- Name: k[len(hdrEaPrefix):],
- Value: data,
- })
+
+ eadata, err := ExtendedAttributesFromTarHeader(hdr)
+ if err != nil {
+ return nil, err
}
- if len(eas) != 0 {
- eadata, err := winio.EncodeExtendedAttributes(eas)
- if err != nil {
- return nil, err
- }
+ if len(eadata) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
@@ -392,13 +451,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
return nil, err
}
}
+
if hdr.Typeflag == tar.TypeSymlink {
- _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
- rp := winio.ReparsePoint{
- Target: filepath.FromSlash(hdr.Linkname),
- IsMountPoint: isMountPoint,
- }
- reparse := winio.EncodeReparsePoint(&rp)
+ reparse := EncodeReparsePointFromTarHeader(hdr)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
@@ -412,7 +467,8 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
return nil, err
}
}
- if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
+
+ if hdr.Typeflag == tar.TypeReg {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go
new file mode 100644
index 000000000..1f5bfe2d5
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/doc.go
@@ -0,0 +1,22 @@
+// This package provides utilities for efficiently performing Win32 IO operations in Go.
+// Currently, this package is provides support for genreal IO and management of
+// - named pipes
+// - files
+// - [Hyper-V sockets]
+//
+// This code is similar to Go's [net] package, and uses IO completion ports to avoid
+// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
+//
+// This limits support to Windows Vista and newer operating systems.
+//
+// Additionally, this package provides support for:
+// - creating and managing GUIDs
+// - writing to [ETW]
+// - opening and manageing VHDs
+// - parsing [Windows Image files]
+// - auto-generating Win32 API code
+//
+// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
+// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
+// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
+package winio
diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go
index 4051c1b33..e104dbdfd 100644
--- a/vendor/github.com/Microsoft/go-winio/ea.go
+++ b/vendor/github.com/Microsoft/go-winio/ea.go
@@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
- return
+ return ea, nb, err
}
nameOffset := fileFullEaInformationSize
@@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
- return
+ return ea, nb, err
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
@@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
- return
+ return ea, nb, err
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
@@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
eas = append(eas, ea)
b = nb
}
- return
+ return eas, err
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
index 0385e4108..fe82a180d 100644
--- a/vendor/github.com/Microsoft/go-winio/file.go
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -10,32 +11,16 @@ import (
"sync/atomic"
"syscall"
"time"
-)
-
-//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
-//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
-//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
-//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
-//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
-
-type atomicBool int32
-
-func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
-func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
-func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
-func (b *atomicBool) swap(new bool) bool {
- var newInt int32
- if new {
- newInt = 1
- }
- return atomic.SwapInt32((*int32)(b), newInt) == 1
-}
-const (
- cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
- cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
+ "golang.org/x/sys/windows"
)
+//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
+//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
+//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
+//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
+//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
+
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
@@ -43,29 +28,29 @@ var (
type timeoutError struct{}
-func (e *timeoutError) Error() string { return "i/o timeout" }
-func (e *timeoutError) Timeout() bool { return true }
-func (e *timeoutError) Temporary() bool { return true }
+func (*timeoutError) Error() string { return "i/o timeout" }
+func (*timeoutError) Timeout() bool { return true }
+func (*timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once
-var ioCompletionPort syscall.Handle
+var ioCompletionPort windows.Handle
-// ioResult contains the result of an asynchronous IO operation
+// ioResult contains the result of an asynchronous IO operation.
type ioResult struct {
bytes uint32
err error
}
-// ioOperation represents an outstanding asynchronous Win32 IO
+// ioOperation represents an outstanding asynchronous Win32 IO.
type ioOperation struct {
- o syscall.Overlapped
+ o windows.Overlapped
ch chan ioResult
}
-func initIo() {
- h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
+func initIO() {
+ h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
}
@@ -76,10 +61,10 @@ func initIo() {
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
// It takes ownership of this handle and will close it if it is garbage collected.
type win32File struct {
- handle syscall.Handle
+ handle windows.Handle
wg sync.WaitGroup
wgLock sync.RWMutex
- closing atomicBool
+ closing atomic.Bool
socket bool
readDeadline deadlineHandler
writeDeadline deadlineHandler
@@ -90,18 +75,18 @@ type deadlineHandler struct {
channel timeoutChan
channelLock sync.RWMutex
timer *time.Timer
- timedout atomicBool
+ timedout atomic.Bool
}
-// makeWin32File makes a new win32File from an existing file handle
-func makeWin32File(h syscall.Handle) (*win32File, error) {
+// makeWin32File makes a new win32File from an existing file handle.
+func makeWin32File(h windows.Handle) (*win32File, error) {
f := &win32File{handle: h}
- ioInitOnce.Do(initIo)
+ ioInitOnce.Do(initIO)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
- err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
+ err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
@@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
return f, nil
}
+// Deprecated: use NewOpenFile instead.
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
+ return NewOpenFile(windows.Handle(h))
+}
+
+func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
// If we return the result of makeWin32File directly, it can result in an
// interface-wrapped nil, rather than a nil interface value.
f, err := makeWin32File(h)
@@ -120,17 +110,17 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return f, nil
}
-// closeHandle closes the resources associated with a Win32 handle
+// closeHandle closes the resources associated with a Win32 handle.
func (f *win32File) closeHandle() {
f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
- if !f.closing.swap(true) {
+ if !f.closing.Swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete
- cancelIoEx(f.handle, nil)
+ _ = cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
- syscall.Close(f.handle)
+ windows.Close(f.handle)
f.handle = 0
} else {
f.wgLock.Unlock()
@@ -143,11 +133,16 @@ func (f *win32File) Close() error {
return nil
}
-// prepareIo prepares for a new IO operation.
+// IsClosed checks if the file has been closed.
+func (f *win32File) IsClosed() bool {
+ return f.closing.Load()
+}
+
+// prepareIO prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
-func (f *win32File) prepareIo() (*ioOperation, error) {
+func (f *win32File) prepareIO() (*ioOperation, error) {
f.wgLock.RLock()
- if f.closing.isSet() {
+ if f.closing.Load() {
f.wgLock.RUnlock()
return nil, ErrFileClosed
}
@@ -158,13 +153,13 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
return c, nil
}
-// ioCompletionProcessor processes completed async IOs forever
-func ioCompletionProcessor(h syscall.Handle) {
+// ioCompletionProcessor processes completed async IOs forever.
+func ioCompletionProcessor(h windows.Handle) {
for {
var bytes uint32
var key uintptr
var op *ioOperation
- err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
+ err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
if op == nil {
panic(err)
}
@@ -172,15 +167,17 @@ func ioCompletionProcessor(h syscall.Handle) {
}
}
-// asyncIo processes the return value from ReadFile or WriteFile, blocking until
+// todo: helsaawy - create an asyncIO version that takes a context
+
+// asyncIO processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
-func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
- if err != syscall.ERROR_IO_PENDING {
+func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+ if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
return int(bytes), err
}
- if f.closing.isSet() {
- cancelIoEx(f.handle, &c.o)
+ if f.closing.Load() {
+ _ = cancelIoEx(f.handle, &c.o)
}
var timeout timeoutChan
@@ -194,8 +191,8 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
select {
case r = <-c.ch:
err = r.err
- if err == syscall.ERROR_OPERATION_ABORTED {
- if f.closing.isSet() {
+ if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
+ if f.closing.Load() {
err = ErrFileClosed
}
} else if err != nil && f.socket {
@@ -204,10 +201,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
- cancelIoEx(f.handle, &c.o)
+ _ = cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
- if err == syscall.ERROR_OPERATION_ABORTED {
+ if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
err = ErrTimeout
}
}
@@ -215,52 +212,52 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
+ // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
runtime.KeepAlive(c)
return int(r.bytes), err
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
- c, err := f.prepareIo()
+ c, err := f.prepareIO()
if err != nil {
return 0, err
}
defer f.wg.Done()
- if f.readDeadline.timedout.isSet() {
+ if f.readDeadline.timedout.Load() {
return 0, ErrTimeout
}
var bytes uint32
- err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
- n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
+ err = windows.ReadFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
- } else if err == syscall.ERROR_BROKEN_PIPE {
+ } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
return 0, io.EOF
- } else {
- return n, err
}
+ return n, err
}
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
- c, err := f.prepareIo()
+ c, err := f.prepareIO()
if err != nil {
return 0, err
}
defer f.wg.Done()
- if f.writeDeadline.timedout.isSet() {
+ if f.writeDeadline.timedout.Load() {
return 0, ErrTimeout
}
var bytes uint32
- err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
- n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
+ err = windows.WriteFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
}
@@ -274,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error {
}
func (f *win32File) Flush() error {
- return syscall.FlushFileBuffers(f.handle)
+ return windows.FlushFileBuffers(f.handle)
}
func (f *win32File) Fd() uintptr {
@@ -291,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
}
d.timer = nil
}
- d.timedout.setFalse()
+ d.timedout.Store(false)
select {
case <-d.channel:
@@ -306,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
}
timeoutIO := func() {
- d.timedout.setTrue()
+ d.timedout.Store(true)
close(d.channel)
}
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
index 3ab6bff69..c860eb991 100644
--- a/vendor/github.com/Microsoft/go-winio/fileinfo.go
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -14,22 +15,46 @@ import (
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
FileAttributes uint32
- pad uint32 // padding
+ _ uint32 // padding
+}
+
+// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
+// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
+// alignment is necessary to pass this as FILE_BASIC_INFO.
+type alignedFileBasicInfo struct {
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
+ FileAttributes uint32
+ _ uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
- bi := &FileBasicInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ bi := &alignedFileBasicInfo{}
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(bi)),
+ uint32(unsafe.Sizeof(*bi)),
+ ); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
- return bi, nil
+ // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
+ // public API of this module. The data may be unnecessarily aligned.
+ return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
}
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
- if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
+ // suitable to pass to GetFileInformationByHandleEx.
+ biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
+ if err := windows.SetFileInformationByHandle(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(&biAligned)),
+ uint32(unsafe.Sizeof(biAligned)),
+ ); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -48,7 +73,10 @@ type FileStandardInfo struct {
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
+ windows.FileStandardInfo,
+ (*byte)(unsafe.Pointer(si)),
+ uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -65,7 +93,12 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileIdInfo,
+ (*byte)(unsafe.Pointer(fileID)),
+ uint32(unsafe.Sizeof(*fileID)),
+ ); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod
index 98a8dea0e..add720ff8 100644
--- a/vendor/github.com/Microsoft/go-winio/go.mod
+++ b/vendor/github.com/Microsoft/go-winio/go.mod
@@ -1,9 +1,11 @@
module github.com/Microsoft/go-winio
-go 1.12
+go 1.21
require (
- github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.7.0
- golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
+ github.com/sirupsen/logrus v1.9.3
+ golang.org/x/sys v0.10.0
+ golang.org/x/tools v0.11.0
)
+
+require golang.org/x/mod v0.12.0 // indirect
diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum
index aa6ad3b57..11aef9910 100644
--- a/vendor/github.com/Microsoft/go-winio/go.sum
+++ b/vendor/github.com/Microsoft/go-winio/go.sum
@@ -1,14 +1,22 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
+golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go
index b632f8f8b..c4fdd9d4a 100644
--- a/vendor/github.com/Microsoft/go-winio/hvsock.go
+++ b/vendor/github.com/Microsoft/go-winio/hvsock.go
@@ -1,26 +1,99 @@
+//go:build windows
// +build windows
package winio
import (
+ "context"
+ "errors"
"fmt"
"io"
"net"
"os"
- "syscall"
"time"
"unsafe"
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/socket"
"github.com/Microsoft/go-winio/pkg/guid"
)
-//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+const afHVSock = 34 // AF_HYPERV
-const (
- afHvSock = 34 // AF_HYPERV
+// Well known Service and VM IDs
+// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
- socketError = ^uintptr(0)
-)
+// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
+func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
+ return guid.GUID{}
+}
+
+// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
+func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
+ return guid.GUID{
+ Data1: 0xffffffff,
+ Data2: 0xffff,
+ Data3: 0xffff,
+ Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ }
+}
+
+// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
+func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
+ return guid.GUID{
+ Data1: 0xe0e16197,
+ Data2: 0xdd56,
+ Data3: 0x4a10,
+ Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
+ }
+}
+
+// HvsockGUIDSiloHost is the address of a silo's host partition:
+// - The silo host of a hosted silo is the utility VM.
+// - The silo host of a silo on a physical host is the physical host.
+func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
+ return guid.GUID{
+ Data1: 0x36bd0c5c,
+ Data2: 0x7276,
+ Data3: 0x4223,
+ Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
+ }
+}
+
+// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
+func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
+ return guid.GUID{
+ Data1: 0x90db8b89,
+ Data2: 0xd35,
+ Data3: 0x4f79,
+ Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
+ }
+}
+
+// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
+// Listening on this VmId accepts connection from:
+// - Inside silos: silo host partition.
+// - Inside hosted silo: host of the VM.
+// - Inside VM: VM host.
+// - Physical host: Not supported.
+func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
+ return guid.GUID{
+ Data1: 0xa42e7cda,
+ Data2: 0xd03f,
+ Data3: 0x480c,
+ Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
+ }
+}
+
+// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
+func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
+ return guid.GUID{
+ Data2: 0xfacb,
+ Data3: 0x11e6,
+ Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
+ }
+}
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
@@ -35,8 +108,10 @@ type rawHvsockAddr struct {
ServiceID guid.GUID
}
+var _ socket.RawSockaddr = &rawHvsockAddr{}
+
// Network returns the address's network name, "hvsock".
-func (addr *HvsockAddr) Network() string {
+func (*HvsockAddr) Network() string {
return "hvsock"
}
@@ -46,14 +121,14 @@ func (addr *HvsockAddr) String() string {
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
- g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
+ g := hvsockVsockServiceTemplate() // make a copy
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
- Family: afHvSock,
+ Family: afHVSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
@@ -64,26 +139,54 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.ServiceID = raw.ServiceID
}
+// Sockaddr returns a pointer to and the size of this struct.
+//
+// Implements the [socket.RawSockaddr] interface, and allows use in
+// [socket.Bind] and [socket.ConnectEx].
+func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
+ return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
+}
+
+// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
+func (r *rawHvsockAddr) FromBytes(b []byte) error {
+ n := int(unsafe.Sizeof(rawHvsockAddr{}))
+
+ if len(b) < n {
+ return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
+ }
+
+ copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
+ if r.Family != afHVSock {
+ return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
+ }
+
+ return nil
+}
+
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
+var _ net.Listener = &HvsockListener{}
+
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
-func newHvSocket() (*win32File, error) {
- fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
+var _ net.Conn = &HvsockConn{}
+
+func newHVSocket() (*win32File, error) {
+ fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
f, err := makeWin32File(fd)
if err != nil {
- syscall.Close(fd)
+ windows.Close(fd)
return nil, err
}
f.socket = true
@@ -93,16 +196,24 @@ func newHvSocket() (*win32File, error) {
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
- sock, err := newHvSocket()
+
+ var sock *win32File
+ sock, err = newHVSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
+ defer func() {
+ if err != nil {
+ _ = sock.Close()
+ }
+ }()
+
sa := addr.raw()
- err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
+ err = socket.Bind(sock.handle, &sa)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
- err = syscall.Listen(sock.handle, 16)
+ err = windows.Listen(sock.handle, 16)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
}
@@ -120,7 +231,7 @@ func (l *HvsockListener) Addr() net.Addr {
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
- sock, err := newHvSocket()
+ sock, err := newHVSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
@@ -129,27 +240,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock.Close()
}
}()
- c, err := l.sock.prepareIo()
+ c, err := l.sock.prepareIO()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
- err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
- _, err = l.sock.asyncIo(c, nil, bytes, err)
- if err != nil {
+ err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
+ if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
+
conn := &HvsockConn{
sock: sock,
}
+ // The local address returned in the AcceptEx buffer is the same as the Listener socket's
+ // address. However, the service GUID reported by GetSockName is different from the Listeners
+ // socket, and is sometimes the same as the local address of the socket that dialed the
+ // address, with the service GUID.Data1 incremented, but othertimes is different.
+ // todo: does the local address matter? is the listener's address or the actual address appropriate?
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
+
+ // initialize the accepted socket and update its properties with those of the listening socket
+ if err = windows.Setsockopt(sock.handle,
+ windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
+ (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
+ return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
+ }
+
sock = nil
return conn, nil
}
@@ -159,54 +285,183 @@ func (l *HvsockListener) Close() error {
return l.sock.Close()
}
-/* Need to finish ConnectEx handling
-func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
- sock, err := newHvSocket()
+// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
+type HvsockDialer struct {
+ // Deadline is the time the Dial operation must connect before erroring.
+ Deadline time.Time
+
+ // Retries is the number of additional connects to try if the connection times out, is refused,
+ // or the host is unreachable
+ Retries uint
+
+ // RetryWait is the time to wait after a connection error to retry
+ RetryWait time.Duration
+
+ rt *time.Timer // redial wait timer
+}
+
+// Dial the Hyper-V socket at addr.
+//
+// See [HvsockDialer.Dial] for more information.
+func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ return (&HvsockDialer{}).Dial(ctx, addr)
+}
+
+// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
+// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
+// retries.
+//
+// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
+func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ op := "dial"
+ // create the conn early to use opErr()
+ conn = &HvsockConn{
+ remote: *addr,
+ }
+
+ if !d.Deadline.IsZero() {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithDeadline(ctx, d.Deadline)
+ defer cancel()
+ }
+
+ // preemptive timeout/cancellation check
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
+ }
+
+ sock, err := newHVSocket()
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, err)
}
defer func() {
if sock != nil {
sock.Close()
}
}()
- c, err := sock.prepareIo()
+
+ sa := addr.raw()
+ err = socket.Bind(sock.handle, &sa)
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, os.NewSyscallError("bind", err))
+ }
+
+ c, err := sock.prepareIO()
+ if err != nil {
+ return nil, conn.opErr(op, err)
}
defer sock.wg.Done()
var bytes uint32
- err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
- _, err = sock.asyncIo(ctx, c, nil, bytes, err)
+ for i := uint(0); i <= d.Retries; i++ {
+ err = socket.ConnectEx(
+ sock.handle,
+ &sa,
+ nil, // sendBuf
+ 0, // sendDataLen
+ &bytes,
+ (*windows.Overlapped)(unsafe.Pointer(&c.o)))
+ _, err = sock.asyncIO(c, nil, bytes, err)
+ if i < d.Retries && canRedial(err) {
+ if err = d.redialWait(ctx); err == nil {
+ continue
+ }
+ }
+ break
+ }
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
}
- conn := &HvsockConn{
- sock: sock,
- remote: *addr,
+
+ // update the connection properties, so shutdown can be used
+ if err = windows.Setsockopt(
+ sock.handle,
+ windows.SOL_SOCKET,
+ windows.SO_UPDATE_CONNECT_CONTEXT,
+ nil, // optvalue
+ 0, // optlen
+ ); err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
+ }
+
+ // get the local name
+ var sal rawHvsockAddr
+ err = socket.GetSockName(sock.handle, &sal)
+ if err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
}
+ conn.local.fromRaw(&sal)
+
+ // one last check for timeout, since asyncIO doesn't check the context
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
+ }
+
+ conn.sock = sock
sock = nil
+
return conn, nil
}
-*/
+
+// redialWait waits before attempting to redial, resetting the timer as appropriate.
+func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
+ if d.RetryWait == 0 {
+ return nil
+ }
+
+ if d.rt == nil {
+ d.rt = time.NewTimer(d.RetryWait)
+ } else {
+ // should already be stopped and drained
+ d.rt.Reset(d.RetryWait)
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-d.rt.C:
+ return nil
+ }
+
+ // stop and drain the timer
+ if !d.rt.Stop() {
+ <-d.rt.C
+ }
+ return ctx.Err()
+}
+
+// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
+func canRedial(err error) bool {
+ //nolint:errorlint // guaranteed to be an Errno
+ switch err {
+ case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
+ windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
+ return true
+ default:
+ return false
+ }
+}
func (conn *HvsockConn) opErr(op string, err error) error {
+ // translate from "file closed" to "socket closed"
+ if errors.Is(err, ErrFileClosed) {
+ err = socket.ErrSocketClosed
+ }
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
- c, err := conn.sock.prepareIo()
+ c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("read", err)
}
defer conn.sock.wg.Done()
- buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+ buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
- err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
- n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
+ err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
+ n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsarecv", err)
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsarecv", eno)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
@@ -229,18 +484,19 @@ func (conn *HvsockConn) Write(b []byte) (int, error) {
}
func (conn *HvsockConn) write(b []byte) (int, error) {
- c, err := conn.sock.prepareIo()
+ c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("write", err)
}
defer conn.sock.wg.Done()
- buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+ buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
- err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
- n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
+ err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
+ n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsasend", err)
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsasend", eno)
}
return 0, conn.opErr("write", err)
}
@@ -252,29 +508,43 @@ func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
+func (conn *HvsockConn) IsClosed() bool {
+ return conn.sock.IsClosed()
+}
+
+// shutdown disables sending or receiving on a socket.
func (conn *HvsockConn) shutdown(how int) error {
- err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
+ if conn.IsClosed() {
+ return socket.ErrSocketClosed
+ }
+
+ err := windows.Shutdown(conn.sock.handle, how)
if err != nil {
+ // If the connection was closed, shutdowns fail with "not connected"
+ if errors.Is(err, windows.WSAENOTCONN) ||
+ errors.Is(err, windows.WSAESHUTDOWN) {
+ err = socket.ErrSocketClosed
+ }
return os.NewSyscallError("shutdown", err)
}
return nil
}
-// CloseRead shuts down the read end of the socket.
+// CloseRead shuts down the read end of the socket, preventing future read operations.
func (conn *HvsockConn) CloseRead() error {
- err := conn.shutdown(syscall.SHUT_RD)
+ err := conn.shutdown(windows.SHUT_RD)
if err != nil {
- return conn.opErr("close", err)
+ return conn.opErr("closeread", err)
}
return nil
}
-// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
-// no more data will be written.
+// CloseWrite shuts down the write end of the socket, preventing future write operations and
+// notifying the other endpoint that no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
- err := conn.shutdown(syscall.SHUT_WR)
+ err := conn.shutdown(windows.SHUT_WR)
if err != nil {
- return conn.opErr("close", err)
+ return conn.opErr("closewrite", err)
}
return nil
}
@@ -291,8 +561,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr {
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
- conn.SetReadDeadline(t)
- conn.SetWriteDeadline(t)
+ // todo: implement `SetDeadline` for `win32File`
+ if err := conn.SetReadDeadline(t); err != nil {
+ return fmt.Errorf("set read deadline: %w", err)
+ }
+ if err := conn.SetWriteDeadline(t); err != nil {
+ return fmt.Errorf("set write deadline: %w", err)
+ }
return nil
}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
new file mode 100644
index 000000000..1f6538817
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
@@ -0,0 +1,2 @@
+// This package contains Win32 filesystem functionality.
+package fs
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
new file mode 100644
index 000000000..0cd9621df
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
@@ -0,0 +1,262 @@
+//go:build windows
+
+package fs
+
+import (
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/stringbuffer"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
+
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
+//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
+
+const NullHandle windows.Handle = 0
+
+// AccessMask defines standard, specific, and generic rights.
+//
+// Used with CreateFile and NtCreateFile (and co.).
+//
+// Bitmask:
+// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+// +---------------+---------------+-------------------------------+
+// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
+// |R|W|E|A| |S| | |
+// +-+-------------+---------------+-------------------------------+
+//
+// GR Generic Read
+// GW Generic Write
+// GE Generic Exectue
+// GA Generic All
+// Resvd Reserved
+// AS Access Security System
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
+type AccessMask = windows.ACCESS_MASK
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // Not actually any.
+ //
+ // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
+ // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
+ FILE_ANY_ACCESS AccessMask = 0
+
+ GENERIC_READ AccessMask = 0x8000_0000
+ GENERIC_WRITE AccessMask = 0x4000_0000
+ GENERIC_EXECUTE AccessMask = 0x2000_0000
+ GENERIC_ALL AccessMask = 0x1000_0000
+ ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
+
+ // Specific Object Access
+ // from ntioapi.h
+
+ FILE_READ_DATA AccessMask = (0x0001) // file & pipe
+ FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
+
+ FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
+ FILE_ADD_FILE AccessMask = (0x0002) // directory
+
+ FILE_APPEND_DATA AccessMask = (0x0004) // file
+ FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
+ FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
+
+ FILE_READ_EA AccessMask = (0x0008) // file & directory
+ FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
+
+ FILE_WRITE_EA AccessMask = (0x0010) // file & directory
+ FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
+
+ FILE_EXECUTE AccessMask = (0x0020) // file
+ FILE_TRAVERSE AccessMask = (0x0020) // directory
+
+ FILE_DELETE_CHILD AccessMask = (0x0040) // directory
+
+ FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
+
+ FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
+
+ FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
+ FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
+ FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
+ FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
+
+ SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
+
+ // Standard Access
+ // from ntseapi.h
+
+ DELETE AccessMask = 0x0001_0000
+ READ_CONTROL AccessMask = 0x0002_0000
+ WRITE_DAC AccessMask = 0x0004_0000
+ WRITE_OWNER AccessMask = 0x0008_0000
+ SYNCHRONIZE AccessMask = 0x0010_0000
+
+ STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
+
+ STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
+ STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
+ STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
+
+ STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
+)
+
+type FileShareMode uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ FILE_SHARE_NONE FileShareMode = 0x00
+ FILE_SHARE_READ FileShareMode = 0x01
+ FILE_SHARE_WRITE FileShareMode = 0x02
+ FILE_SHARE_DELETE FileShareMode = 0x04
+ FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
+)
+
+type FileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winbase.h
+
+ CREATE_NEW FileCreationDisposition = 0x01
+ CREATE_ALWAYS FileCreationDisposition = 0x02
+ OPEN_EXISTING FileCreationDisposition = 0x03
+ OPEN_ALWAYS FileCreationDisposition = 0x04
+ TRUNCATE_EXISTING FileCreationDisposition = 0x05
+)
+
+// Create disposition values for NtCreate*
+type NTFileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // From ntioapi.h
+
+ FILE_SUPERSEDE NTFileCreationDisposition = 0x00
+ FILE_OPEN NTFileCreationDisposition = 0x01
+ FILE_CREATE NTFileCreationDisposition = 0x02
+ FILE_OPEN_IF NTFileCreationDisposition = 0x03
+ FILE_OVERWRITE NTFileCreationDisposition = 0x04
+ FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05
+ FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
+)
+
+// CreateFile and co. take flags or attributes together as one parameter.
+// Define alias until we can use generics to allow both
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
+type FileFlagOrAttribute uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winnt.h
+
+ FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
+ FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
+ FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
+ FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
+ FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
+ FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
+ FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
+ FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
+ FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
+ FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
+ FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
+)
+
+// NtCreate* functions take a dedicated CreateOptions parameter.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
+//
+// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
+type NTCreateOptions uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // From ntioapi.h
+
+ FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001
+ FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002
+ FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004
+ FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
+
+ FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010
+ FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
+ FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040
+ FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080
+
+ FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
+ FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200
+ FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400
+ FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800
+
+ FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000
+ FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000
+ FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
+ FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000
+)
+
+type FileSQSFlag = FileFlagOrAttribute
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winbase.h
+
+ SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
+ SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
+ SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
+ SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
+
+ SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000
+ SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
+)
+
+// GetFinalPathNameByHandle flags
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
+type GetFinalPathFlag uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
+
+ FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
+ FILE_NAME_OPENED GetFinalPathFlag = 0x8
+
+ VOLUME_NAME_DOS GetFinalPathFlag = 0x0
+ VOLUME_NAME_GUID GetFinalPathFlag = 0x1
+ VOLUME_NAME_NT GetFinalPathFlag = 0x2
+ VOLUME_NAME_NONE GetFinalPathFlag = 0x4
+)
+
+// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
+// with the given handle and flags. It transparently takes care of creating a buffer of the
+// correct size for the call.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
+func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
+ b := stringbuffer.NewWString()
+ //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
+ for {
+ n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
+ if err != nil {
+ return "", err
+ }
+ // If the buffer wasn't large enough, n will be the total size needed (including null terminator).
+ // Resize and try again.
+ if n > b.Cap() {
+ b.ResizeTo(n)
+ continue
+ }
+ // If the buffer is large enough, n will be the size not including the null terminator.
+ // Convert to a Go string and return.
+ return b.String(), nil
+ }
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
new file mode 100644
index 000000000..81760ac67
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
@@ -0,0 +1,12 @@
+package fs
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
+type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
+
+// Impersonation levels
+const (
+ SecurityAnonymous SecurityImpersonationLevel = 0
+ SecurityIdentification SecurityImpersonationLevel = 1
+ SecurityImpersonation SecurityImpersonationLevel = 2
+ SecurityDelegation SecurityImpersonationLevel = 3
+)
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
new file mode 100644
index 000000000..a94e234c7
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
@@ -0,0 +1,61 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package fs
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procCreateFileW = modkernel32.NewProc("CreateFileW")
+)
+
+func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+}
+
+func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
new file mode 100644
index 000000000..7e82f9afa
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
@@ -0,0 +1,20 @@
+package socket
+
+import (
+ "unsafe"
+)
+
+// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
+// struct must meet the Win32 sockaddr requirements specified here:
+// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
+//
+// Specifically, the struct size must be least larger than an int16 (unsigned short)
+// for the address family.
+type RawSockaddr interface {
+ // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
+ // for the RawSockaddr's data to be overwritten by syscalls (if necessary).
+ //
+ // It is the callers responsibility to validate that the values are valid; invalid
+ // pointers or size can cause a panic.
+ Sockaddr() (unsafe.Pointer, int32, error)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
new file mode 100644
index 000000000..88580d974
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
@@ -0,0 +1,177 @@
+//go:build windows
+
+package socket
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/Microsoft/go-winio/pkg/guid"
+ "golang.org/x/sys/windows"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
+
+//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
+//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
+//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+
+const socketError = uintptr(^uint32(0))
+
+var (
+ // todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
+
+ ErrBufferSize = errors.New("buffer size")
+ ErrAddrFamily = errors.New("address family")
+ ErrInvalidPointer = errors.New("invalid pointer")
+ ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
+)
+
+// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
+
+// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
+// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
+func GetSockName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ // although getsockname returns WSAEFAULT if the buffer is too small, it does not set
+ // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
+ return getsockname(s, ptr, &l)
+}
+
+// GetPeerName returns the remote address the socket is connected to.
+//
+// See [GetSockName] for more information.
+func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return getpeername(s, ptr, &l)
+}
+
+func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return bind(s, ptr, l)
+}
+
+// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
+// their sockaddr interface, so they cannot be used with HvsockAddr
+// Replicate functionality here from
+// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
+
+// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
+// runtime via a WSAIoctl call:
+// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
+
+type runtimeFunc struct {
+ id guid.GUID
+ once sync.Once
+ addr uintptr
+ err error
+}
+
+func (f *runtimeFunc) Load() error {
+ f.once.Do(func() {
+ var s windows.Handle
+ s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
+ if f.err != nil {
+ return
+ }
+ defer windows.CloseHandle(s) //nolint:errcheck
+
+ var n uint32
+ f.err = windows.WSAIoctl(s,
+ windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (*byte)(unsafe.Pointer(&f.id)),
+ uint32(unsafe.Sizeof(f.id)),
+ (*byte)(unsafe.Pointer(&f.addr)),
+ uint32(unsafe.Sizeof(f.addr)),
+ &n,
+ nil, // overlapped
+ 0, // completionRoutine
+ )
+ })
+ return f.err
+}
+
+var (
+ // todo: add `AcceptEx` and `GetAcceptExSockaddrs`
+ WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
+ Data1: 0x25a207b9,
+ Data2: 0xddf3,
+ Data3: 0x4660,
+ Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+ }
+
+ connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
+)
+
+func ConnectEx(
+ fd windows.Handle,
+ rsa RawSockaddr,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) error {
+ if err := connectExFunc.Load(); err != nil {
+ return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
+ }
+ ptr, n, err := rsa.Sockaddr()
+ if err != nil {
+ return err
+ }
+ return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+// BOOL LpfnConnectex(
+// [in] SOCKET s,
+// [in] const sockaddr *name,
+// [in] int namelen,
+// [in, optional] PVOID lpSendBuffer,
+// [in] DWORD dwSendDataLength,
+// [out] LPDWORD lpdwBytesSent,
+// [in] LPOVERLAPPED lpOverlapped
+// )
+
+func connectEx(
+ s windows.Handle,
+ name unsafe.Pointer,
+ namelen int32,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) (err error) {
+ r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
+ uintptr(s),
+ uintptr(name),
+ uintptr(namelen),
+ uintptr(unsafe.Pointer(sendBuf)),
+ uintptr(sendDataLen),
+ uintptr(unsafe.Pointer(bytesSent)),
+ uintptr(unsafe.Pointer(overlapped)),
+ )
+
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
new file mode 100644
index 000000000..e1504126a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
@@ -0,0 +1,69 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package socket
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
+
+ procbind = modws2_32.NewProc("bind")
+ procgetpeername = modws2_32.NewProc("getpeername")
+ procgetsockname = modws2_32.NewProc("getsockname")
+)
+
+func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
new file mode 100644
index 000000000..42ebc019f
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
@@ -0,0 +1,132 @@
+package stringbuffer
+
+import (
+ "sync"
+ "unicode/utf16"
+)
+
+// TODO: worth exporting and using in mkwinsyscall?
+
+// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
+// large path strings:
+// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
+const MinWStringCap = 310
+
+// use *[]uint16 since []uint16 creates an extra allocation where the slice header
+// is copied to heap and then referenced via pointer in the interface header that sync.Pool
+// stores.
+var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
+ New: func() interface{} {
+ b := make([]uint16, MinWStringCap)
+ return &b
+ },
+}
+
+func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
+
+// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
+// This avoids taking a pointer to the slice header in WString, which can be set to nil.
+func freeBuffer(b []uint16) { pathPool.Put(&b) }
+
+// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
+// for interacting with Win32 APIs.
+// Sizes are specified as uint32 and not int.
+//
+// It is not thread safe.
+type WString struct {
+ // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
+
+ // raw buffer
+ b []uint16
+}
+
+// NewWString returns a [WString] allocated from a shared pool with an
+// initial capacity of at least [MinWStringCap].
+// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
+//
+// The buffer should be freed via [WString.Free]
+func NewWString() *WString {
+ return &WString{
+ b: newBuffer(),
+ }
+}
+
+func (b *WString) Free() {
+ if b.empty() {
+ return
+ }
+ freeBuffer(b.b)
+ b.b = nil
+}
+
+// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
+// previous buffer back into pool.
+func (b *WString) ResizeTo(c uint32) uint32 {
+ // already sufficient (or n is 0)
+ if c <= b.Cap() {
+ return b.Cap()
+ }
+
+ if c <= MinWStringCap {
+ c = MinWStringCap
+ }
+ // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
+ if c <= 2*b.Cap() {
+ c = 2 * b.Cap()
+ }
+
+ b2 := make([]uint16, c)
+ if !b.empty() {
+ copy(b2, b.b)
+ freeBuffer(b.b)
+ }
+ b.b = b2
+ return c
+}
+
+// Buffer returns the underlying []uint16 buffer.
+func (b *WString) Buffer() []uint16 {
+ if b.empty() {
+ return nil
+ }
+ return b.b
+}
+
+// Pointer returns a pointer to the first uint16 in the buffer.
+// If the [WString.Free] has already been called, the pointer will be nil.
+func (b *WString) Pointer() *uint16 {
+ if b.empty() {
+ return nil
+ }
+ return &b.b[0]
+}
+
+// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
+//
+// It assumes that the data is null-terminated.
+func (b *WString) String() string {
+ // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
+ // and would make this code Windows-only, which makes no sense.
+ // So copy UTF16ToString code into here.
+ // If other windows-specific code is added, switch to [windows.UTF16ToString]
+
+ s := b.b
+ for i, v := range s {
+ if v == 0 {
+ s = s[:i]
+ break
+ }
+ }
+ return string(utf16.Decode(s))
+}
+
+// Cap returns the underlying buffer capacity.
+func (b *WString) Cap() uint32 {
+ if b.empty() {
+ return 0
+ }
+ return b.cap()
+}
+
+func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
+func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
index 96700a73d..a2da6639d 100644
--- a/vendor/github.com/Microsoft/go-winio/pipe.go
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -10,26 +11,52 @@ import (
"net"
"os"
"runtime"
- "syscall"
"time"
"unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/fs"
)
-//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
-//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
-//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
-//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
-//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
-//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
-//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
-//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
-//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
-//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
+//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
+//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW
+//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
+//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
+//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
+//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
+//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
+//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
+
+type PipeConn interface {
+ net.Conn
+ Disconnect() error
+ Flush() error
+}
+
+// type aliases for mkwinsyscall code
+type (
+ ntAccessMask = fs.AccessMask
+ ntFileShareMode = fs.FileShareMode
+ ntFileCreationDisposition = fs.NTFileCreationDisposition
+ ntFileOptions = fs.NTCreateOptions
+)
type ioStatusBlock struct {
Status, Information uintptr
}
+// typedef struct _OBJECT_ATTRIBUTES {
+// ULONG Length;
+// HANDLE RootDirectory;
+// PUNICODE_STRING ObjectName;
+// ULONG Attributes;
+// PVOID SecurityDescriptor;
+// PVOID SecurityQualityOfService;
+// } OBJECT_ATTRIBUTES;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
type objectAttributes struct {
Length uintptr
RootDirectory uintptr
@@ -45,51 +72,39 @@ type unicodeString struct {
Buffer uintptr
}
+// typedef struct _SECURITY_DESCRIPTOR {
+// BYTE Revision;
+// BYTE Sbz1;
+// SECURITY_DESCRIPTOR_CONTROL Control;
+// PSID Owner;
+// PSID Group;
+// PACL Sacl;
+// PACL Dacl;
+// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
type securityDescriptor struct {
Revision byte
Sbz1 byte
Control uint16
Owner uintptr
Group uintptr
- Sacl uintptr
- Dacl uintptr
+ Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
+ Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
}
-type ntstatus int32
+type ntStatus int32
-func (status ntstatus) Err() error {
+func (status ntStatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
-const (
- cERROR_PIPE_BUSY = syscall.Errno(231)
- cERROR_NO_DATA = syscall.Errno(232)
- cERROR_PIPE_CONNECTED = syscall.Errno(535)
- cERROR_SEM_TIMEOUT = syscall.Errno(121)
-
- cSECURITY_SQOS_PRESENT = 0x100000
- cSECURITY_ANONYMOUS = 0
-
- cPIPE_TYPE_MESSAGE = 4
-
- cPIPE_READMODE_MESSAGE = 2
-
- cFILE_OPEN = 1
- cFILE_CREATE = 2
-
- cFILE_PIPE_MESSAGE_TYPE = 1
- cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
-
- cSE_DACL_PRESENT = 4
-)
-
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
- // This error should match net.errClosing since docker takes a dependency on its text.
- ErrPipeListenerClosed = errors.New("use of closed network connection")
+ ErrPipeListenerClosed = net.ErrClosed
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
@@ -99,6 +114,8 @@ type win32Pipe struct {
path string
}
+var _ PipeConn = (*win32Pipe)(nil)
+
type win32MessageBytePipe struct {
win32Pipe
writeClosed bool
@@ -116,9 +133,14 @@ func (f *win32Pipe) RemoteAddr() net.Addr {
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
- f.SetReadDeadline(t)
- f.SetWriteDeadline(t)
- return nil
+ if err := f.SetReadDeadline(t); err != nil {
+ return err
+ }
+ return f.SetWriteDeadline(t)
+}
+
+func (f *win32Pipe) Disconnect() error {
+ return disconnectNamedPipe(f.win32File.handle)
}
// CloseWrite closes the write side of a message pipe in byte mode.
@@ -157,14 +179,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
- } else if err == syscall.ERROR_MORE_DATA {
+ } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
@@ -173,7 +195,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return n, err
}
-func (s pipeAddress) Network() string {
+func (pipeAddress) Network() string {
return "pipe"
}
@@ -182,18 +204,24 @@ func (s pipeAddress) String() string {
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
-func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
+func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
for {
-
select {
case <-ctx.Done():
- return syscall.Handle(0), ctx.Err()
+ return windows.Handle(0), ctx.Err()
default:
- h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ h, err := fs.CreateFile(*path,
+ access,
+ 0, // mode
+ nil, // security attributes
+ fs.OPEN_EXISTING,
+ fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
+ 0, // template file handle
+ )
if err == nil {
return h, nil
}
- if err != cERROR_PIPE_BUSY {
+ if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
@@ -213,9 +241,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
} else {
absTimeout = time.Now().Add(2 * time.Second)
}
- ctx, _ := context.WithDeadline(context.Background(), absTimeout)
+ ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
+ defer cancel()
conn, err := DialPipeContext(ctx, path)
- if err == context.DeadlineExceeded {
+ if errors.Is(err, context.DeadlineExceeded) {
return nil, ErrTimeout
}
return conn, err
@@ -224,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
// cancellation or timeout.
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
- return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
+ return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
}
+// PipeImpLevel is an enumeration of impersonation levels that may be set
+// when calling DialPipeAccessImpersonation.
+type PipeImpLevel uint32
+
+const (
+ PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS)
+ PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
+ PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION)
+ PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION)
+)
+
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
// cancellation or timeout.
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
+ return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
+}
+
+// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
+// `access` at `impLevel` until `ctx` cancellation or timeout. The other
+// DialPipe* implementations use PipeImpLevelAnonymous.
+func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
var err error
- var h syscall.Handle
- h, err = tryDialPipe(ctx, &path, access)
+ var h windows.Handle
+ h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
if err != nil {
return nil, err
}
@@ -245,13 +292,13 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
f, err := makeWin32File(h)
if err != nil {
- syscall.Close(h)
+ windows.Close(h)
return nil, err
}
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
- if flags&cPIPE_TYPE_MESSAGE != 0 {
+ if flags&windows.PIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
@@ -265,7 +312,7 @@ type acceptResponse struct {
}
type win32PipeListener struct {
- firstHandle syscall.Handle
+ firstHandle windows.Handle
path string
config PipeConfig
acceptCh chan (chan acceptResponse)
@@ -273,8 +320,8 @@ type win32PipeListener struct {
doneCh chan int
}
-func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
- path16, err := syscall.UTF16FromString(path)
+func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
+ path16, err := windows.UTF16FromString(path)
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
@@ -283,59 +330,81 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
- if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
+ if err := rtlDosPathNameToNtPathName(&path16[0],
+ &ntPath,
+ 0,
+ 0,
+ ).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
- defer localFree(ntPath.Buffer)
+ defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
oa.ObjectName = &ntPath
+ oa.Attributes = windows.OBJ_CASE_INSENSITIVE
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
- len := uint32(len(sd))
- sdb := localAlloc(0, len)
- defer localFree(sdb)
+ //todo: does `sdb` need to be allocated on the heap, or can go allocate it?
+ l := uint32(len(sd))
+ sdb, err := windows.LocalAlloc(0, l)
+ if err != nil {
+ return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
+ }
+ defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
} else {
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
- return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
+ return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
}
- defer localFree(dacl)
+ defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
sdb := &securityDescriptor{
Revision: 1,
- Control: cSE_DACL_PRESENT,
+ Control: windows.SE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
- typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
+ typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
- typ |= cFILE_PIPE_MESSAGE_TYPE
+ typ |= windows.FILE_PIPE_MESSAGE_TYPE
}
- disposition := uint32(cFILE_OPEN)
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
+ disposition := fs.FILE_OPEN
+ access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
if first {
- disposition = cFILE_CREATE
+ disposition = fs.FILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
- access = syscall.SYNCHRONIZE
+ access = fs.SYNCHRONIZE
}
timeout := int64(-50 * 10000) // 50ms
var (
- h syscall.Handle
+ h windows.Handle
iosb ioStatusBlock
)
- err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
+ err = ntCreateNamedPipeFile(&h,
+ access,
+ &oa,
+ &iosb,
+ fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
+ disposition,
+ 0,
+ typ,
+ 0,
+ 0,
+ 0xffffffff,
+ uint32(c.InputBufferSize),
+ uint32(c.OutputBufferSize),
+ &timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
@@ -351,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
}
f, err := makeWin32File(h)
if err != nil {
- syscall.Close(h)
+ windows.Close(h)
return nil, err
}
return f, nil
@@ -380,7 +449,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p.Close()
p = nil
err = <-ch
- if err == nil || err == ErrFileClosed {
+ if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
err = ErrPipeListenerClosed
}
}
@@ -402,15 +471,15 @@ func (l *win32PipeListener) listenerRoutine() {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
- if err != cERROR_NO_DATA {
+ if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
break
}
}
responseCh <- acceptResponse{p, err}
- closed = err == ErrPipeListenerClosed
+ closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
}
}
- syscall.Close(l.firstHandle)
+ windows.Close(l.firstHandle)
l.firstHandle = 0
// Notify Close() and Accept() callers that the handle has been closed.
close(l.doneCh)
@@ -469,15 +538,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
}
func connectPipe(p *win32File) error {
- c, err := p.prepareIo()
+ c, err := p.prepareIO()
if err != nil {
return err
}
defer p.wg.Done()
err = connectNamedPipe(p.handle, &c.o)
- _, err = p.asyncIo(c, nil, 0, err)
- if err != nil && err != cERROR_PIPE_CONNECTED {
+ _, err = p.asyncIO(c, nil, 0, err)
+ if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
return err
}
return nil
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
index f497c0e39..48ce4e924 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
@@ -1,5 +1,3 @@
-// +build windows
-
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
@@ -9,26 +7,26 @@ package guid
import (
"crypto/rand"
- "crypto/sha1"
+ "crypto/sha1" //nolint:gosec // not used for secure application
"encoding"
"encoding/binary"
"fmt"
"strconv"
-
- "golang.org/x/sys/windows"
)
+//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
+
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
-// The variants specified by RFC 4122.
+// The variants specified by RFC 4122 section 4.1.1.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
- VariantRFC4122
+ VariantRFC4122 // RFC 4122
VariantMicrosoft
VariantFuture
)
@@ -38,16 +36,13 @@ const (
// hash of an input string.
type Version uint8
+func (v Version) String() string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
-// GUID represents a GUID/UUID. It has the same structure as
-// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
-// that type. It is defined as its own type so that stringification and
-// marshaling can be supported. The representation matches that used by native
-// Windows code.
-type GUID windows.GUID
-
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
@@ -70,7 +65,7 @@ func NewV4() (GUID, error) {
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
- b := sha1.New()
+ b := sha1.New() //nolint:gosec // not used for secure application
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
new file mode 100644
index 000000000..805bd3548
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package guid
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type as that is only available to builds
+// targeted at `windows`. The representation matches that used by native Windows
+// code.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
new file mode 100644
index 000000000..27e45ee5c
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
@@ -0,0 +1,13 @@
+//go:build windows
+// +build windows
+
+package guid
+
+import "golang.org/x/sys/windows"
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type so that stringification and
+// marshaling can be supported. The representation matches that used by native
+// Windows code.
+type GUID windows.GUID
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
new file mode 100644
index 000000000..4076d3132
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
+
+package guid
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VariantUnknown-0]
+ _ = x[VariantNCS-1]
+ _ = x[VariantRFC4122-2]
+ _ = x[VariantMicrosoft-3]
+ _ = x[VariantFuture-4]
+}
+
+const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
+
+var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
+
+func (i Variant) String() string {
+ if i >= Variant(len(_Variant_index)-1) {
+ return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
deleted file mode 100644
index fca241590..000000000
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// +build windows
-
-package security
-
-import (
- "os"
- "syscall"
- "unsafe"
-
- "github.com/pkg/errors"
-)
-
-type (
- accessMask uint32
- accessMode uint32
- desiredAccess uint32
- inheritMode uint32
- objectType uint32
- shareMode uint32
- securityInformation uint32
- trusteeForm uint32
- trusteeType uint32
-
- explicitAccess struct {
- accessPermissions accessMask
- accessMode accessMode
- inheritance inheritMode
- trustee trustee
- }
-
- trustee struct {
- multipleTrustee *trustee
- multipleTrusteeOperation int32
- trusteeForm trusteeForm
- trusteeType trusteeType
- name uintptr
- }
-)
-
-const (
- accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ
-
- accessModeGrant accessMode = 1
-
- desiredAccessReadControl desiredAccess = 0x20000
- desiredAccessWriteDac desiredAccess = 0x40000
-
- gvmga = "GrantVmGroupAccess:"
-
- inheritModeNoInheritance inheritMode = 0x0
- inheritModeSubContainersAndObjectsInherit inheritMode = 0x3
-
- objectTypeFileObject objectType = 0x1
-
- securityInformationDACL securityInformation = 0x4
-
- shareModeRead shareMode = 0x1
- shareModeWrite shareMode = 0x2
-
- sidVmGroup = "S-1-5-83-0"
-
- trusteeFormIsSid trusteeForm = 0
-
- trusteeTypeWellKnownGroup trusteeType = 5
-)
-
-// GrantVMGroupAccess sets the DACL for a specified file or directory to
-// include Grant ACE entries for the VM Group SID. This is a golang re-
-// implementation of the same function in vmcompute, just not exported in
-// RS5. Which kind of sucks. Sucks a lot :/
-func GrantVmGroupAccess(name string) error {
- // Stat (to determine if `name` is a directory).
- s, err := os.Stat(name)
- if err != nil {
- return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
- }
-
- // Get a handle to the file/directory. Must defer Close on success.
- fd, err := createFile(name, s.IsDir())
- if err != nil {
- return err // Already wrapped
- }
- defer syscall.CloseHandle(fd)
-
- // Get the current DACL and Security Descriptor. Must defer LocalFree on success.
- ot := objectTypeFileObject
- si := securityInformationDACL
- sd := uintptr(0)
- origDACL := uintptr(0)
- if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
- return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
- }
- defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
-
- // Generate a new DACL which is the current DACL with the required ACEs added.
- // Must defer LocalFree on success.
- newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL)
- if err != nil {
- return err // Already wrapped
- }
- defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
-
- // And finally use SetSecurityInfo to apply the updated DACL.
- if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
- return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
- }
-
- return nil
-}
-
-// createFile is a helper function to call [Nt]CreateFile to get a handle to
-// the file or directory.
-func createFile(name string, isDir bool) (syscall.Handle, error) {
- namep := syscall.StringToUTF16(name)
- da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
- sm := uint32(shareModeRead | shareModeWrite)
- fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
- if isDir {
- fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
- }
- fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
- if err != nil {
- return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
- }
- return fd, nil
-}
-
-// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added.
-// The caller is responsible for LocalFree of the returned DACL on success.
-func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
- // Generate pointers to the SIDs based on the string SIDs
- sid, err := syscall.StringToSid(sidVmGroup)
- if err != nil {
- return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
- }
-
- inheritance := inheritModeNoInheritance
- if isDir {
- inheritance = inheritModeSubContainersAndObjectsInherit
- }
-
- eaArray := []explicitAccess{
- explicitAccess{
- accessPermissions: accessMaskDesiredPermission,
- accessMode: accessModeGrant,
- inheritance: inheritance,
- trustee: trustee{
- trusteeForm: trusteeFormIsSid,
- trusteeType: trusteeTypeWellKnownGroup,
- name: uintptr(unsafe.Pointer(sid)),
- },
- },
- }
-
- modifiedDACL := uintptr(0)
- if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
- return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
- }
-
- return modifiedDACL, nil
-}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
deleted file mode 100644
index c40c2739b..000000000
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package security
-
-//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
-
-//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
-//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
-//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
deleted file mode 100644
index 4a90cb3cc..000000000
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Code generated by 'go generate'; DO NOT EDIT.
-
-package security
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-var _ unsafe.Pointer
-
-// Do the interface allocations only once for common
-// Errno values.
-const (
- errnoERROR_IO_PENDING = 997
-)
-
-var (
- errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
- errERROR_EINVAL error = syscall.EINVAL
-)
-
-// errnoErr returns common boxed Errno values, to prevent
-// allocations at runtime.
-func errnoErr(e syscall.Errno) error {
- switch e {
- case 0:
- return errERROR_EINVAL
- case errnoERROR_IO_PENDING:
- return errERROR_IO_PENDING
- }
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
- return e
-}
-
-var (
- modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
-
- procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
- procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
- procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
-)
-
-func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
- if r1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
- if r1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
- if r1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go
index c3dd7c217..d9b90b6e8 100644
--- a/vendor/github.com/Microsoft/go-winio/privilege.go
+++ b/vendor/github.com/Microsoft/go-winio/privilege.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -8,7 +9,6 @@ import (
"fmt"
"runtime"
"sync"
- "syscall"
"unicode/utf16"
"golang.org/x/sys/windows"
@@ -17,29 +17,24 @@ import (
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
//sys revertToSelf() (err error) = advapi32.RevertToSelf
-//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
-//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
+//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
+//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
- SE_PRIVILEGE_ENABLED = 2
+ //revive:disable-next-line:var-naming ALL_CAPS
+ SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
- ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
+ //revive:disable-next-line:var-naming ALL_CAPS
+ ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
)
-const (
- securityAnonymous = iota
- securityIdentification
- securityImpersonation
- securityDelegation
-)
-
var (
privNames = make(map[string]uint64)
privNameMutex sync.Mutex
@@ -51,11 +46,9 @@ type PrivilegeError struct {
}
func (e *PrivilegeError) Error() string {
- s := ""
+ s := "Could not enable privilege "
if len(e.privileges) > 1 {
s = "Could not enable privileges "
- } else {
- s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
@@ -94,7 +87,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
}
func mapPrivileges(names []string) ([]uint64, error) {
- var privileges []uint64
+ privileges := make([]uint64, 0, len(names))
privNameMutex.Lock()
defer privNameMutex.Unlock()
for _, name := range names {
@@ -127,7 +120,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
return err
}
- p, _ := windows.GetCurrentProcess()
+ p := windows.CurrentProcess()
var token windows.Token
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
if err != nil {
@@ -140,10 +133,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
- binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+ _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
- binary.Write(&b, binary.LittleEndian, p)
- binary.Write(&b, binary.LittleEndian, action)
+ _ = binary.Write(&b, binary.LittleEndian, p)
+ _ = binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
@@ -151,7 +144,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e
if !success {
return err
}
- if err == ERROR_NOT_ALL_ASSIGNED {
+ if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
return &PrivilegeError{privileges}
}
return nil
@@ -177,13 +170,13 @@ func getPrivilegeName(luid uint64) string {
}
func newThreadToken() (windows.Token, error) {
- err := impersonateSelf(securityImpersonation)
+ err := impersonateSelf(windows.SecurityImpersonation)
if err != nil {
return 0, err
}
var token windows.Token
- err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
+ err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
if err != nil {
rerr := revertToSelf()
if rerr != nil {
diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go
index fc1ee4d3a..67d1a104a 100644
--- a/vendor/github.com/Microsoft/go-winio/reparse.go
+++ b/vendor/github.com/Microsoft/go-winio/reparse.go
@@ -1,3 +1,6 @@
+//go:build windows
+// +build windows
+
package winio
import (
@@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
}
var b bytes.Buffer
- binary.Write(&b, binary.LittleEndian, &data)
+ _ = binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
- binary.Write(&b, binary.LittleEndian, flags)
+ _ = binary.Write(&b, binary.LittleEndian, flags)
}
- binary.Write(&b, binary.LittleEndian, ntTarget16)
- binary.Write(&b, binary.LittleEndian, target16)
+ _ = binary.Write(&b, binary.LittleEndian, ntTarget16)
+ _ = binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}
diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go
index db1b370a1..c3685e98e 100644
--- a/vendor/github.com/Microsoft/go-winio/sd.go
+++ b/vendor/github.com/Microsoft/go-winio/sd.go
@@ -1,22 +1,20 @@
+//go:build windows
// +build windows
package winio
import (
- "syscall"
+ "errors"
+ "fmt"
"unsafe"
+
+ "golang.org/x/sys/windows"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
-//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
-//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
-//sys localFree(mem uintptr) = LocalFree
-//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
-
-const (
- cERROR_NONE_MAPPED = syscall.Errno(1332)
-)
+//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
type AccountLookupError struct {
Name string
@@ -28,8 +26,10 @@ func (e *AccountLookupError) Error() string {
return "lookup account: empty account name specified"
}
var s string
- switch e.Err {
- case cERROR_NONE_MAPPED:
+ switch {
+ case errors.Is(e.Err, windows.ERROR_INVALID_SID):
+ s = "the security ID structure is invalid"
+ case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
s = "not found"
default:
s = e.Err.Error()
@@ -37,6 +37,8 @@ func (e *AccountLookupError) Error() string {
return "lookup account " + e.Name + ": " + s
}
+func (e *AccountLookupError) Unwrap() error { return e.Err }
+
type SddlConversionError struct {
Sddl string
Err error
@@ -46,15 +48,19 @@ func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
+func (e *SddlConversionError) Unwrap() error { return e.Err }
+
// LookupSidByName looks up the SID of an account by name
+//
+//revive:disable-next-line:var-naming SID, not Sid
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
- return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
+ return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
- if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
+ if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
@@ -68,31 +74,60 @@ func LookupSidByName(name string) (sid string, err error) {
if err != nil {
return "", &AccountLookupError{name, err}
}
- sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
- localFree(uintptr(unsafe.Pointer(strBuffer)))
+ sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
+ _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
return sid, nil
}
+// LookupNameBySid looks up the name of an account by SID
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupNameBySid(sid string) (name string, err error) {
+ if sid == "" {
+ return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
+ }
+
+ sidBuffer, err := windows.UTF16PtrFromString(sid)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ var sidPtr *byte
+ if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+ defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
+
+ var nameSize, refDomainSize, sidNameUse uint32
+ err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+ return "", &AccountLookupError{sid, err}
+ }
+
+ nameBuffer := make([]uint16, nameSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ name = windows.UTF16ToString(nameBuffer)
+ return name, nil
+}
+
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
- var sdBuffer uintptr
- err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
+ sd, err := windows.SecurityDescriptorFromString(sddl)
if err != nil {
- return nil, &SddlConversionError{sddl, err}
+ return nil, &SddlConversionError{Sddl: sddl, Err: err}
}
- defer localFree(sdBuffer)
- sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
- copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
- return sd, nil
+ b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
+ return b, nil
}
func SecurityDescriptorToSddl(sd []byte) (string, error) {
- var sddl *uint16
- // The returned string length seems to including an aribtrary number of terminating NULs.
- // Don't use it.
- err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
- if err != nil {
- return "", err
+ if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
+ return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
}
- defer localFree(uintptr(unsafe.Pointer(sddl)))
- return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
+ s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
+ return s.String(), nil
}
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
index 5955c99fd..a6ca111b3 100644
--- a/vendor/github.com/Microsoft/go-winio/syscall.go
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winio
-//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
index b03b789e6..b54cad112 100644
--- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
+++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package vhd
@@ -7,17 +8,16 @@ import (
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
- "github.com/pkg/errors"
"golang.org/x/sys/windows"
)
-//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go
-//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
-//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
-//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk
-//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk
-//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath
+//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
+//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
+//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
+//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
+//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
type (
CreateVirtualDiskFlag uint32
@@ -62,20 +62,35 @@ type OpenVirtualDiskParameters struct {
Version2 OpenVersion2
}
+// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
+// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating
+// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
+type openVersion2 struct {
+ getInfoOnly int32
+ readOnly int32
+ resiliencyGUID guid.GUID
+}
+
+type openVirtualDiskParameters struct {
+ version uint32
+ version2 openVersion2
+}
+
type AttachVersion2 struct {
RestrictedOffset uint64
RestrictedLength uint64
}
type AttachVirtualDiskParameters struct {
- Version uint32 // Must always be set to 2
+ Version uint32
Version2 AttachVersion2
}
const (
+ //revive:disable-next-line:var-naming ALL_CAPS
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
- // Access Mask for opening a VHD
+ // Access Mask for opening a VHD.
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
@@ -87,7 +102,7 @@ const (
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
- // Flags for creating a VHD
+ // Flags for creating a VHD.
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
@@ -95,12 +110,12 @@ const (
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
- CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
+ CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
- CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
+ CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
- // Flags for opening a VHD
+ // Flags for opening a VHD.
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
@@ -113,7 +128,7 @@ const (
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
- // Flags for attaching a VHD
+ // Flags for attaching a VHD.
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
@@ -126,12 +141,14 @@ const (
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
- // Flags for detaching a VHD
+ // Flags for detaching a VHD.
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
)
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
// default values.
+//
+//revive:disable-next-line:var-naming VHDX, not Vhdx
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
params := CreateVirtualDiskParameters{
Version: 2,
@@ -146,21 +163,20 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
return err
}
- if err := syscall.CloseHandle(handle); err != nil {
- return err
- }
- return nil
+ return syscall.CloseHandle(handle)
}
// DetachVirtualDisk detaches a virtual hard disk by handle.
func DetachVirtualDisk(handle syscall.Handle) (err error) {
if err := detachVirtualDisk(handle, 0, 0); err != nil {
- return errors.Wrap(err, "failed to detach virtual disk")
+ return fmt.Errorf("failed to detach virtual disk: %w", err)
}
return nil
}
// DetachVhd detaches a vhd found at `path`.
+//
+//revive:disable-next-line:var-naming VHD, not Vhd
func DetachVhd(path string) error {
handle, err := OpenVirtualDisk(
path,
@@ -170,12 +186,16 @@ func DetachVhd(path string) error {
if err != nil {
return err
}
- defer syscall.CloseHandle(handle)
+ defer syscall.CloseHandle(handle) //nolint:errcheck
return DetachVirtualDisk(handle)
}
// AttachVirtualDisk attaches a virtual hard disk for use.
-func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
+func AttachVirtualDisk(
+ handle syscall.Handle,
+ attachVirtualDiskFlag AttachVirtualDiskFlag,
+ parameters *AttachVirtualDiskParameters,
+) (err error) {
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
if err := attachVirtualDisk(
handle,
@@ -185,13 +205,15 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
parameters,
nil,
); err != nil {
- return errors.Wrap(err, "failed to attach virtual disk")
+ return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
+//
+//revive:disable-next-line:var-naming VHD, not Vhd
func AttachVhd(path string) (err error) {
handle, err := OpenVirtualDisk(
path,
@@ -202,20 +224,24 @@ func AttachVhd(path string) (err error) {
return err
}
- defer syscall.CloseHandle(handle)
+ defer syscall.CloseHandle(handle) //nolint:errcheck
params := AttachVirtualDiskParameters{Version: 2}
if err := AttachVirtualDisk(
handle,
AttachVirtualDiskFlagNone,
¶ms,
); err != nil {
- return errors.Wrap(err, "failed to attach virtual disk")
+ return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
-func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
+func OpenVirtualDisk(
+ vhdPath string,
+ virtualDiskAccessMask VirtualDiskAccessMask,
+ openVirtualDiskFlags VirtualDiskFlag,
+) (syscall.Handle, error) {
parameters := OpenVirtualDiskParameters{Version: 2}
handle, err := OpenVirtualDiskWithParameters(
vhdPath,
@@ -230,29 +256,55 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask
}
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
-func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
+func OpenVirtualDiskWithParameters(
+ vhdPath string,
+ virtualDiskAccessMask VirtualDiskAccessMask,
+ openVirtualDiskFlags VirtualDiskFlag,
+ parameters *OpenVirtualDiskParameters,
+) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
+ getInfoOnly int32
+ readOnly int32
)
if parameters.Version != 2 {
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
+ if parameters.Version2.GetInfoOnly {
+ getInfoOnly = 1
+ }
+ if parameters.Version2.ReadOnly {
+ readOnly = 1
+ }
+ params := &openVirtualDiskParameters{
+ version: parameters.Version,
+ version2: openVersion2{
+ getInfoOnly,
+ readOnly,
+ parameters.Version2.ResiliencyGUID,
+ },
+ }
if err := openVirtualDisk(
&defaultType,
vhdPath,
uint32(virtualDiskAccessMask),
uint32(openVirtualDiskFlags),
- parameters,
+ params,
&handle,
); err != nil {
- return 0, errors.Wrap(err, "failed to open virtual disk")
+ return 0, fmt.Errorf("failed to open virtual disk: %w", err)
}
return handle, nil
}
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
-func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
+func CreateVirtualDisk(
+ path string,
+ virtualDiskAccessMask VirtualDiskAccessMask,
+ createVirtualDiskFlags CreateVirtualDiskFlag,
+ parameters *CreateVirtualDiskParameters,
+) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
@@ -272,7 +324,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
nil,
&handle,
); err != nil {
- return handle, errors.Wrap(err, "failed to create virtual disk")
+ return handle, fmt.Errorf("failed to create virtual disk: %w", err)
}
return handle, nil
}
@@ -290,12 +342,14 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
&diskPathSizeInBytes,
&diskPhysicalPathBuf[0],
); err != nil {
- return "", errors.Wrap(err, "failed to get disk physical path")
+ return "", fmt.Errorf("failed to get disk physical path: %w", err)
}
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
}
// CreateDiffVhd is a helper function to create a differencing virtual disk.
+//
+//revive:disable-next-line:var-naming VHD, not Vhd
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
// Setting `ParentPath` is how to signal to create a differencing disk.
createParams := &CreateVirtualDiskParameters{
@@ -314,10 +368,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
createParams,
)
if err != nil {
- return fmt.Errorf("failed to create differencing vhd: %s", err)
+ return fmt.Errorf("failed to create differencing vhd: %w", err)
}
if err := syscall.CloseHandle(vhdHandle); err != nil {
- return fmt.Errorf("failed to close differencing vhd handle: %s", err)
+ return fmt.Errorf("failed to close differencing vhd handle: %w", err)
}
return nil
}
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
index 572f7b42f..95c040743 100644
--- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
@@ -1,4 +1,6 @@
-// Code generated by 'go generate'; DO NOT EDIT.
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package vhd
@@ -31,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
@@ -47,60 +46,60 @@ var (
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
)
-func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
- if r1 != 0 {
- err = errnoErr(e1)
+func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
+func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(path)
- if err != nil {
+ _p0, win32err = syscall.UTF16PtrFromString(path)
+ if win32err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle)
}
-func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
- r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
- if r1 != 0 {
- err = errnoErr(e1)
+func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procCreateVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
- if r1 != 0 {
- err = errnoErr(e1)
+func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procDetachVirtualDisk.Addr(), uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
- if r1 != 0 {
- err = errnoErr(e1)
+func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procGetVirtualDiskPhysicalPath.Addr(), uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
+func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(path)
- if err != nil {
+ _p0, win32err = syscall.UTF16PtrFromString(path)
+ if win32err != nil {
return
}
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
}
-func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
- r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
- if r1 != 0 {
- err = errnoErr(e1)
+func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
index 176ff75e3..89b66eda8 100644
--- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated by 'go generate'; DO NOT EDIT.
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package winio
@@ -31,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
@@ -43,38 +42,34 @@ var (
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
- procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
- procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
- procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
- procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
- procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
- procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
- procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
- procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
- procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
- procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
- procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
- procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
- procBackupRead = modkernel32.NewProc("BackupRead")
- procBackupWrite = modkernel32.NewProc("BackupWrite")
- procCancelIoEx = modkernel32.NewProc("CancelIoEx")
- procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
- procCreateFileW = modkernel32.NewProc("CreateFileW")
- procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
- procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
- procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
- procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
- procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
- procLocalAlloc = modkernel32.NewProc("LocalAlloc")
- procLocalFree = modkernel32.NewProc("LocalFree")
- procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
- procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
- procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
- procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
- procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
- procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
- procbind = modws2_32.NewProc("bind")
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
+ procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
+ procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+ procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procBackupRead = modkernel32.NewProc("BackupRead")
+ procBackupWrite = modkernel32.NewProc("BackupWrite")
+ procCancelIoEx = modkernel32.NewProc("CancelIoEx")
+ procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
+ procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
+ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
+ procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
+ procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
+ procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+ procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
+ procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
+ procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
+ procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
+ procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
@@ -82,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
if releaseAll {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+ r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
err = errnoErr(e1)
@@ -90,47 +85,24 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
return
}
-func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
- if r1 == 0 {
- err = errnoErr(e1)
- }
- return
-}
-
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(str)
- if err != nil {
- return
- }
- return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
-}
-
-func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+func convertStringSidToSid(str *uint16, sid **byte) (err error) {
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func getSecurityDescriptorLength(sd uintptr) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
- len = uint32(r0)
- return
-}
-
func impersonateSelf(level uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -147,7 +119,15 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -164,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16,
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -181,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -203,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
}
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -223,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool,
}
func revertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
@@ -243,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
if processSecurity {
_p2 = 1
}
- r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
@@ -263,57 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
if processSecurity {
_p2 = 1
}
- r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
+func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
+func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(name)
- if err != nil {
- return
- }
- return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
-}
-
-func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
- handle = syscall.Handle(r0)
- if handle == syscall.InvalidHandle {
- err = errnoErr(e1)
- }
- return
-}
-
-func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
- newport = syscall.Handle(r0)
+func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
+ newport = windows.Handle(r0)
if newport == 0 {
err = errnoErr(e1)
}
return
}
-func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
@@ -322,106 +284,95 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
-func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
- handle = syscall.Handle(r0)
- if handle == syscall.InvalidHandle {
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
err = errnoErr(e1)
}
return
}
-func getCurrentThread() (h syscall.Handle) {
- r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
- h = syscall.Handle(r0)
- return
-}
-
-func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+func disconnectNamedPipe(pipe windows.Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
- if r1 == 0 {
- err = errnoErr(e1)
- }
+func getCurrentThread() (h windows.Handle) {
+ r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
+ h = windows.Handle(r0)
return
}
-func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
+func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
- r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
- ptr = uintptr(r0)
+func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func localFree(mem uintptr) {
- syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
+func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
+func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
- status = ntstatus(r0)
+func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
+ status = ntStatus(r0)
return
}
-func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
- status = ntstatus(r0)
+func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
+ status = ntStatus(r0)
return
}
-func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
- status = ntstatus(r0)
+func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
+ status = ntStatus(r0)
return
}
-func rtlNtStatusToDosError(status ntstatus) (winerr error) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
+func rtlNtStatusToDosError(status ntStatus) (winerr error) {
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
-func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
+func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
-
-func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
- if r1 == socketError {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes
index 94f480de9..dd0d09faa 100644
--- a/vendor/github.com/Microsoft/hcsshim/.gitattributes
+++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes
@@ -1 +1,3 @@
-* text=auto eol=lf
\ No newline at end of file
+* text=auto eol=lf
+vendor/** -text
+test/vendor/** -text
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore
index aec9bd4bb..74b68f0ad 100644
--- a/vendor/github.com/Microsoft/hcsshim/.gitignore
+++ b/vendor/github.com/Microsoft/hcsshim/.gitignore
@@ -1,3 +1,53 @@
+# Binaries for programs and plugins
*.exe
-.idea
-.vscode
+*.dll
+*.so
+*.dylib
+
+# Ignore vscode setting files
+.vscode/
+.idea/
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Ignore gcs bin directory
+service/bin/
+service/pkg/
+
+*.img
+*.vhd
+*.tar.gz
+*.tar
+
+# Make stuff
+.rootfs-done
+bin/*
+rootfs/*
+rootfs-conv/*
+*.o
+/build/
+
+deps/*
+out/*
+
+# protobuf files
+# only files at root of the repo, otherwise this will cause issues with vendoring
+/protobuf/*
+
+# test results
+test/results
+
+# go workspace files
+go.work
+go.work.sum
+
+# keys and related artifacts
+*.pem
+*.cose
diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
new file mode 100644
index 000000000..7d38a2fb9
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
@@ -0,0 +1,174 @@
+run:
+ timeout: 8m
+ tests: true
+ build-tags:
+ - admin
+ - functional
+ - integration
+ skip-dirs:
+ # paths are relative to module root
+ - cri-containerd/test-images
+
+linters:
+ enable:
+ # defaults:
+ # - errcheck
+ # - gosimple
+ # - govet
+ # - ineffassign
+ # - staticcheck
+ # - typecheck
+ # - unused
+
+ - errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`)
+ - gofmt # whether code was gofmt-ed
+ - govet # enabled by default, but just to be sure
+ - nolintlint # ill-formed or insufficient nolint directives
+ - stylecheck # golint replacement
+ - thelper # test helpers without t.Helper()
+
+linters-settings:
+ govet:
+ enable-all: true
+ disable:
+ # struct order is often for Win32 compat
+ # also, ignore pointer bytes/GC issues for now until performance becomes an issue
+ - fieldalignment
+ check-shadowing: true
+
+ stylecheck:
+ # https://staticcheck.io/docs/checks
+ checks: ["all"]
+
+issues:
+ exclude-rules:
+ # err is very often shadowed in nested scopes
+ - linters:
+ - govet
+ text: '^shadow: declaration of "err" shadows declaration'
+
+ # path is relative to module root, which is ./test/
+ - path: cri-containerd
+ linters:
+ - stylecheck
+ text: "^ST1003: should not use underscores in package names$"
+ source: "^package cri_containerd$"
+
+ # don't bother with propper error wrapping in test code
+ - path: cri-containerd
+ linters:
+ - errorlint
+ text: "non-wrapping format verb for fmt.Errorf"
+
+ # This repo has a LOT of generated schema files, operating system bindings, and other
+ # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example).
+ # There's also some structs that we *could* change the initialisms to be Go friendly
+ # (Id -> ID) but they're exported and it would be a breaking change.
+ # This makes it so that most new code, code that isn't supposed to be a pretty faithful
+ # mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
+ # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
+ - path: layer.go
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: hcsshim.go
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: cmd\\ncproxy\\nodenetsvc\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: cmd\\ncproxy_mock\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcs\\schema2\\
+ linters:
+ - stylecheck
+ - gofmt
+
+ - path: internal\\wclayer\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: hcn\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcs\\schema1\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hns\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: ext4\\internal\\compactext4\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: ext4\\internal\\format\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\guestrequest\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\guest\\prot\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\windevice\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\winapi\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\vmcompute\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\regstate\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcserror\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ # v0 APIs are deprecated, but still retained for backwards compatability
+ - path: cmd\\ncproxy\\
+ linters:
+ - staticcheck
+ text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0"
+
+ - path: internal\\tools\\networkagent
+ linters:
+ - staticcheck
+ text: "^SA1019: .*nodenetsvc[/]?v0"
+
+ - path: internal\\vhdx\\info
+ linters:
+ - stylecheck
+ Text: "ST1003:"
diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile
new file mode 100644
index 000000000..de6435894
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/Makefile
@@ -0,0 +1,188 @@
+BASE:=base.tar.gz
+DEV_BUILD:=0
+
+GO:=go
+GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
+CGO_ENABLED:=0
+GOMODVENDOR:=
+
+CFLAGS:=-O2 -Wall
+LDFLAGS:=-static -s # strip C binaries
+
+GO_FLAGS_EXTRA:=
+ifeq "$(GOMODVENDOR)" "1"
+GO_FLAGS_EXTRA += -mod=vendor
+endif
+GO_BUILD_TAGS:=
+ifneq ($(strip $(GO_BUILD_TAGS)),)
+GO_FLAGS_EXTRA += -tags="$(GO_BUILD_TAGS)"
+endif
+GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
+
+SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
+# additional directories to search for rule prerequisites and targets
+VPATH=$(SRCROOT)
+
+DELTA_TARGET=out/delta.tar.gz
+
+ifeq "$(DEV_BUILD)" "1"
+DELTA_TARGET=out/delta-dev.tar.gz
+endif
+
+ifeq "$(SNP_BUILD)" "1"
+DELTA_TARGET=out/delta-snp.tar.gz
+endif
+
+# The link aliases for gcstools
+GCS_TOOLS=\
+ generichook \
+ install-drivers
+
+# Common path prefix.
+PATH_PREFIX:=
+# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
+VMGS_TOOL:=
+IGVM_TOOL:=
+KERNEL_PATH:=
+
+.PHONY: all always rootfs test snp simple
+
+.DEFAULT_GOAL := all
+
+all: out/initrd.img out/rootfs.tar.gz
+
+clean:
+ find -name '*.o' -print0 | xargs -0 -r rm
+ rm -rf bin deps rootfs out
+
+test:
+ cd $(SRCROOT) && $(GO) test -v ./internal/guest/...
+
+rootfs: out/rootfs.vhd
+
+snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs
+
+simple: out/simple.vmgs snp
+
+%.vmgs: %.bin
+ rm -f $@
+ # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
+ $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
+ $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
+
+# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
+out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
+ rm -f $@
+ python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0
+
+ROOTFS_DEVICE:=/dev/sda
+VERITY_DEVICE:=/dev/sdb
+# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
+out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
+ rm -f $@
+ python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
+
+# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
+out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh
+ rm -f $@
+ python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0
+
+# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
+%.vhd: % bin/cmd/tar2ext4
+ ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
+
+# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
+%.vhd: %.ext4 bin/cmd/tar2ext4
+ ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
+
+%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
+ veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
+ # Retrieve info required by dm-verity at boot time
+ # Get the blocksize of rootfs
+ cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
+ cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
+ cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
+ cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
+ cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
+ echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
+
+out/rootfs.hash.salt:
+ hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
+
+out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4
+ gzip -f -d ./out/rootfs.tar.gz
+ ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@
+
+out/rootfs.tar.gz: out/initrd.img
+ rm -rf rootfs-conv
+ mkdir rootfs-conv
+ gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
+ tar -zcf $@ -C rootfs-conv .
+ rm -rf rootfs-conv
+
+out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
+ $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
+ gzip -c out/initrd.img.uncompressed > $@
+ rm out/initrd.img.uncompressed
+
+# This target includes utilities which may be useful for testing purposes.
+out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
+ rm -rf rootfs-dev
+ mkdir rootfs-dev
+ tar -xzf out/delta.tar.gz -C rootfs-dev
+ cp bin/internal/tools/snp-report rootfs-dev/bin/
+ tar -zcf $@ -C rootfs-dev .
+ rm -rf rootfs-dev
+
+out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh
+ rm -rf rootfs-snp
+ mkdir rootfs-snp
+ tar -xzf out/delta.tar.gz -C rootfs-snp
+ cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh
+ cp boot/startup_simple.sh rootfs-snp/startup_simple.sh
+ cp boot/startup.sh rootfs-snp/startup.sh
+ cp bin/internal/tools/snp-report rootfs-snp/bin/
+ chmod a+x rootfs-snp/startup_v2056.sh
+ chmod a+x rootfs-snp/startup_simple.sh
+ chmod a+x rootfs-snp/startup.sh
+ tar -zcf $@ -C rootfs-snp .
+ rm -rf rootfs-snp
+
+out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile
+ @mkdir -p out
+ rm -rf rootfs
+ mkdir -p rootfs/bin/
+ mkdir -p rootfs/info/
+ cp bin/init rootfs/
+ cp bin/vsockexec rootfs/bin/
+ cp bin/cmd/gcs rootfs/bin/
+ cp bin/cmd/gcstools rootfs/bin/
+ cp bin/cmd/hooks/wait-paths rootfs/bin/
+ for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
+ git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \
+ git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \
+ date --iso-8601=minute --utc > rootfs/info/tar.date
+ $(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \
+ jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \
+ jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date)
+ tar -zcf $@ -C rootfs .
+ rm -rf rootfs
+
+out/containerd-shim-runhcs-v1.exe:
+ GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1
+
+bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd:
+ @mkdir -p $(dir $@)
+ GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
+
+bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
+ @mkdir -p bin
+ $(CC) $(LDFLAGS) -o $@ $^
+
+bin/init: init/init.o vsockexec/vsock.o
+ @mkdir -p bin
+ $(CC) $(LDFLAGS) -o $@ $^
+
+%.o: %.c
+ @mkdir -p $(dir $@)
+ $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml
index ee18671aa..17145bb25 100644
--- a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml
+++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml
@@ -1,49 +1,25 @@
-version = "unstable"
-generator = "gogoctrd"
-plugins = ["grpc", "fieldpath"]
+version = "2"
+generators = ["go", "go-grpc"]
-# Control protoc include paths. Below are usually some good defaults, but feel
-# free to try it without them if it works for your project.
+# Control protoc include paths.
[includes]
- # Include paths that will be added before all others. Typically, you want to
- # treat the root of the project as an include, but this may not be necessary.
before = ["./protobuf"]
- # Paths that should be treated as include roots in relation to the vendor
- # directory. These will be calculated with the vendor directory nearest the
- # target package.
- packages = ["github.com/gogo/protobuf"]
+ # defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows.
+ # override defaults to supress errors about non-existant directories.
+ after = []
- # Paths that will be added untouched to the end of the includes. We use
- # `/usr/local/include` to pickup the common install location of protobuf.
- # This is the default.
- after = ["/usr/local/include"]
-
-# This section maps protobuf imports to Go packages. These will become
-# `-M` directives in the call to the go protobuf generator.
+# This section maps protobuf imports to Go packages.
[packages]
- "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
- "google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
- "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
- "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
- "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
- "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
- "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
- "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
- "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1"
-
-[[overrides]]
-prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
-plugins = ["ttrpc"]
-
-[[overrides]]
-prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
-plugins = ["ttrpc"]
-
-[[overrides]]
-prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
-plugins = ["ttrpc"]
+ # github.com/containerd/cgroups protofiles still list their go path as "github.com/containerd/cgroups/cgroup1/stats"
+ "github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto" = "github.com/containerd/cgroups/v3/cgroup1/stats"
[[overrides]]
-prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
-plugins = ["ttrpc"]
\ No newline at end of file
+prefixes = [
+ "github.com/Microsoft/hcsshim/internal/shimdiag",
+ "github.com/Microsoft/hcsshim/internal/extendedtask",
+ "github.com/Microsoft/hcsshim/internal/computeagent",
+ "github.com/Microsoft/hcsshim/internal/ncproxyttrpc",
+ "github.com/Microsoft/hcsshim/internal/vmservice",
+]
+generators = ["go", "go-ttrpc"]
diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md
index 95c300365..320438048 100644
--- a/vendor/github.com/Microsoft/hcsshim/README.md
+++ b/vendor/github.com/Microsoft/hcsshim/README.md
@@ -2,22 +2,139 @@
[](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
-This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
+This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers.
-It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
+It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well.
+
+## Building
+
+While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
+
+### Linux Hyper-V Container Guest Agent
+
+To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs.
+
+```powershell
+C:\> $env:GOOS="linux"
+C:\> go build .\cmd\gcs\
+```
+
+or on a Linux machine
+
+```sh
+> go build ./cmd/gcs
+```
+
+If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container.
+
+```sh
+docker pull busybox
+docker run --name base_image_container busybox
+docker export base_image_container | gzip > base.tar.gz
+BASE=./base.tar.gz
+make all
+```
+
+If the build is successful, in the `./out` folder you should see:
+
+```sh
+> ls ./out/
+delta.tar.gz initrd.img rootfs.tar.gz
+```
+
+### Containerd Shim
+
+For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
+
+Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
+
+```powershell
+C:\> $env:GOOS="windows"
+C:\> go build .\cmd\containerd-shim-runhcs-v1
+```
+
+Then place the binary in the same directory that Containerd is located at in your environment.
+A default Containerd configuration file can be generated by running:
+
+```powershell
+.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii
+```
+
+This config file will already have the shim set as the default runtime for cri interactions.
+
+To trial using the shim out with ctr.exe:
+
+```powershell
+C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!"
+```
## Contributing
-This project welcomes contributions and suggestions. Most contributions require you to agree to a
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
-the rights to use your contribution. For details, visit https://cla.microsoft.com.
+the rights to use your contribution. For details, visit [Microsoft CLA](https://cla.microsoft.com).
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
-We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
+We require that contributors sign their commits
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+We also require that contributors sign their commits using using [`git commit --signoff`][git-commit-s]
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
+
+Please see [the developer certificate](https://developercertificate.org) for more info,
+as well as to make sure that you can attest to the rules listed.
+Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off.
+
+### Linting
+
+Code must pass a linting stage, which uses [`golangci-lint`][lint].
+Since `./test` is a separate Go module, the linter is run from both the root and the
+`test` directories. Additionally, the linter is run with `GOOS` set to both `windows` and
+`linux`.
+
+The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
+automatically with VSCode by adding the following to your workspace or folder settings:
+
+```json
+ "go.lintTool": "golangci-lint",
+ "go.lintOnSave": "package",
+```
+
+Additional editor [integrations options are also available][lint-ide].
+Alternatively, `golangci-lint` can be [installed][lint-install] and run locally:
+
+```shell
+# use . or specify a path to only lint a package
+# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
+> golangci-lint run
+```
+
+To run across the entire repo for both `GOOS=windows` and `linux`:
+
+```powershell
+> foreach ( $goos in ('windows', 'linux') ) {
+ foreach ( $repo in ('.', 'test') ) {
+ pwsh -Command "cd $repo && go env -w GOOS=$goos && golangci-lint.exe run --verbose"
+ }
+}
+```
+
+### Go Generate
+
+The pipeline checks that auto-generated code, via `go generate`, are up to date.
+Similar to the [linting stage](#linting), `go generate` is run in both the root and test Go modules.
+
+This can be done via:
+
+```shell
+> go generate ./...
+> cd test && go generate ./...
+```
## Code of Conduct
@@ -27,7 +144,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
## Dependencies
-This project requires Golang 1.9 or newer to build.
+This project requires Golang 1.18 or newer to build.
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
@@ -44,3 +161,10 @@ For additional details, see [Report a Computer Security Vulnerability](https://t
---------------
Copyright (c) 2018 Microsoft Corp. All rights reserved.
+
+[lint]: https://golangci-lint.run/
+[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
+[lint-install]: https://golangci-lint.run/usage/install/#local-installation
+
+[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
+[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
diff --git a/vendor/github.com/Microsoft/hcsshim/SECURITY.md b/vendor/github.com/Microsoft/hcsshim/SECURITY.md
new file mode 100644
index 000000000..869fdfe2b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
index 7f1f2823d..301a10888 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -17,8 +19,8 @@ import (
//
// `layerData` is the parent read-only layer data.
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
- title := "hcsshim.AttachLayerStorageFilter"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::AttachLayerStorageFilter"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -36,3 +38,31 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L
}
return nil
}
+
+// AttachOverlayFilter sets up a filter of the given type on a writable container layer. Currently the only
+// supported filter types are WCIFS & UnionFS (defined in internal/hcs/schema2/layer.go)
+//
+// `volumePath` is volume path at which writable layer is mounted. If the
+// path does not end in a `\` the platform will append it automatically.
+//
+// `layerData` is the parent read-only layer data.
+func AttachOverlayFilter(ctx context.Context, volumePath string, layerData LayerData) (err error) {
+ title := "hcsshim::AttachOverlayFilter"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("volumePath", volumePath),
+ )
+
+ bytes, err := json.Marshal(layerData)
+ if err != nil {
+ return err
+ }
+
+ err = hcsAttachOverlayFilter(volumePath, string(bytes))
+ if err != nil {
+ return errors.Wrap(err, "failed to attach overlay filter")
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
index 8e28e6c50..5058d3b55 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -12,8 +14,8 @@ import (
//
// `layerPath` is a path to a directory containing the layer to export.
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
- title := "hcsshim.DestroyLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::DestroyLayer"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
index 435473257..6e00e4a1f 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
@@ -1,8 +1,12 @@
+//go:build windows
+
package computestorage
import (
"context"
+ "encoding/json"
+ hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
@@ -12,8 +16,8 @@ import (
//
// `layerPath` is a path to a directory containing the layer to export.
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
- title := "hcsshim.DetachLayerStorageFilter"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::DetachLayerStorageFilter"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
@@ -24,3 +28,27 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error)
}
return nil
}
+
+// DetachOverlayFilter detaches the filter on a writable container layer.
+//
+// `volumePath` is a path to writable container volume.
+func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcsschema.FileSystemFilterType) (err error) {
+ title := "hcsshim::DetachOverlayFilter"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("volumePath", volumePath))
+
+ layerData := LayerData{}
+ layerData.FilterType = filterType
+ bytes, err := json.Marshal(layerData)
+ if err != nil {
+ return err
+ }
+
+ err = hcsDetachOverlayFilter(volumePath, string(bytes))
+ if err != nil {
+ return errors.Wrap(err, "failed to detach overlay filter")
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go
index a1b12dd12..c6370a5c9 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -19,8 +21,8 @@ import (
//
// `options` are the export options applied to the exported layer.
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
- title := "hcsshim.ExportLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::ExportLayer"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -28,17 +30,17 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD
trace.StringAttribute("exportFolderPath", exportFolderPath),
)
- ldbytes, err := json.Marshal(layerData)
+ ldBytes, err := json.Marshal(layerData)
if err != nil {
return err
}
- obytes, err := json.Marshal(options)
+ oBytes, err := json.Marshal(options)
if err != nil {
return err
}
- err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes))
+ err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes))
if err != nil {
return errors.Wrap(err, "failed to export layer")
}
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go
index 83c0fa33f..2140e5c9f 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -5,16 +7,20 @@ import (
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
- "go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer.
//
// If the VHD is not mounted it will be temporarily mounted.
+//
+// NOTE: This API had a breaking change in the operating system after Windows Server 2019.
+// On ws2019 the API expects to get passed a file handle from CreateFile for the vhd that
+// the caller wants to format. On > ws2019, its expected that the caller passes a vhd handle
+// that can be obtained from the virtdisk APIs.
func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) {
- title := "hcsshim.FormatWritableLayerVhd"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::FormatWritableLayerVhd"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go
index 87fee452c..858c84601 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -6,13 +8,17 @@ import (
"path/filepath"
"syscall"
- "github.com/Microsoft/go-winio/pkg/security"
"github.com/Microsoft/go-winio/vhd"
+ "github.com/Microsoft/hcsshim/internal/memory"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
+
+ "github.com/Microsoft/hcsshim/internal/security"
)
-const defaultVHDXBlockSizeInMB = 1
+const (
+ defaultVHDXBlockSizeInMB = 1
+)
// SetupContainerBaseLayer is a helper to setup a containers scratch. It
// will create and format the vhdx's inside and the size is configurable with the sizeInGB
@@ -59,8 +65,8 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
createParams := &vhd.CreateVirtualDiskParameters{
Version: 2,
Version2: vhd.CreateVersion2{
- MaximumSize: sizeInGB * 1024 * 1024 * 1024,
- BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
+ MaximumSize: sizeInGB * memory.GiB,
+ BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
},
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
@@ -135,8 +141,8 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
createParams := &vhd.CreateVirtualDiskParameters{
Version: 2,
Version2: vhd.CreateVersion2{
- MaximumSize: sizeInGB * 1024 * 1024 * 1024,
- BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
+ MaximumSize: sizeInGB * memory.GiB,
+ BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
},
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go
index 0c61dab32..e1c87416a 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -19,8 +21,8 @@ import (
//
// `layerData` is the parent layer data.
func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) {
- title := "hcsshim.ImportLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::ImportLayer"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go
index 53ed8ea6e..d0c621605 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -16,8 +18,8 @@ import (
//
// `layerData` is the parent read-only layer data.
func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) {
- title := "hcsshim.InitializeWritableLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::InitializeWritableLayer"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go
index fcdbbef81..4f4d8ebf2 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -6,14 +8,13 @@ import (
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
- "go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer.
func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {
- title := "hcsshim.GetLayerVhdMountPath"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::GetLayerVhdMountPath"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go
index 06aaf841e..1c685aed0 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package computestorage
import (
@@ -21,8 +23,8 @@ import (
//
// `options` are the options applied while processing the layer.
func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) {
- title := "hcsshim.SetupBaseOSLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::SetupBaseOSLayer"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -48,12 +50,16 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H
// `volumePath` is the path to the volume to be used for setup.
//
// `options` are the options applied while processing the layer.
+//
+// NOTE: This API is only available on builds of Windows greater than 19645. Inside we
+// check if the hosts build has the API available by using 'GetVersion' which requires
+// the calling application to be manifested. https://docs.microsoft.com/en-us/windows/win32/sbscs/manifests
func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) {
if osversion.Build() < 19645 {
return errors.New("SetupBaseOSVolume is not present on builds older than 19645")
}
- title := "hcsshim.SetupBaseOSVolume"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ title := "hcsshim::SetupBaseOSVolume"
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
index 95aff9c18..5af931f2f 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
@@ -7,11 +7,11 @@ import (
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
)
-//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go storage.go
//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer?
//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer?
-//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer?
+//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestroyLayer?
//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer?
//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer?
//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter?
@@ -19,11 +19,17 @@ import (
//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd?
//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath?
//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume?
+//sys hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsAttachOverlayFilter?
+//sys hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsDetachOverlayFilter?
+
+type Version = hcsschema.Version
+type Layer = hcsschema.Layer
// LayerData is the data used to describe parent layer information.
type LayerData struct {
- SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"`
- Layers []hcsschema.Layer `json:"Layers,omitempty"`
+ SchemaVersion Version `json:"SchemaVersion,omitempty"`
+ Layers []Layer `json:"Layers,omitempty"`
+ FilterType hcsschema.FileSystemFilterType `json:"FilterType,omitempty"`
}
// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall.
diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
index 4f9518067..9f697beca 100644
--- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package computestorage
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,55 +29,50 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
var (
modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
- procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer")
- procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
- procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer")
- procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer")
- procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer")
procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter")
+ procHcsAttachOverlayFilter = modcomputestorage.NewProc("HcsAttachOverlayFilter")
+ procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer")
procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter")
+ procHcsDetachOverlayFilter = modcomputestorage.NewProc("HcsDetachOverlayFilter")
+ procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath")
+ procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer")
+ procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer")
+ procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer")
procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume")
)
-func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) {
+func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath)
- if hr != nil {
- return
- }
- var _p2 *uint16
- _p2, hr = syscall.UTF16PtrFromString(layerData)
+ _p1, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
- return _hcsImportLayer(_p0, _p1, _p2)
+ return _hcsAttachLayerStorageFilter(_p0, _p1)
}
-func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) {
- if hr = procHcsImportLayer.Find(); hr != nil {
+func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) {
+ hr = procHcsAttachLayerStorageFilter.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
+ r0, _, _ := syscall.SyscallN(procHcsAttachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -84,35 +82,50 @@ func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uin
return
}
-func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) {
+func hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(layerPath)
+ _p0, hr = syscall.UTF16PtrFromString(volumePath)
if hr != nil {
return
}
var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(exportFolderPath)
+ _p1, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
- var _p2 *uint16
- _p2, hr = syscall.UTF16PtrFromString(layerData)
+ return _hcsAttachOverlayFilter(_p0, _p1)
+}
+
+func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
+ hr = procHcsAttachOverlayFilter.Find()
if hr != nil {
return
}
- var _p3 *uint16
- _p3, hr = syscall.UTF16PtrFromString(options)
+ r0, _, _ := syscall.SyscallN(procHcsAttachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func hcsDestroyLayer(layerPath string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
- return _hcsExportLayer(_p0, _p1, _p2, _p3)
+ return _hcsDestroyLayer(_p0)
}
-func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) {
- if hr = procHcsExportLayer.Find(); hr != nil {
+func _hcsDestroyLayer(layerPath *uint16) (hr error) {
+ hr = procHcsDestroyLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsDestroyLayer.Addr(), uintptr(unsafe.Pointer(layerPath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -122,20 +135,21 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin
return
}
-func hcsDestroyLayer(layerPath string) (hr error) {
+func hcsDetachLayerStorageFilter(layerPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
- return _hcsDestroyLayer(_p0)
+ return _hcsDetachLayerStorageFilter(_p0)
}
-func _hcsDestroyLayer(layerPath *uint16) (hr error) {
- if hr = procHcsDestoryLayer.Find(); hr != nil {
+func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
+ hr = procHcsDetachLayerStorageFilter.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsDetachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -145,25 +159,26 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) {
return
}
-func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) {
+func hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(layerPath)
+ _p0, hr = syscall.UTF16PtrFromString(volumePath)
if hr != nil {
return
}
var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(options)
+ _p1, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
- return _hcsSetupBaseOSLayer(_p0, handle, _p1)
+ return _hcsDetachOverlayFilter(_p0, _p1)
}
-func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) {
- if hr = procHcsSetupBaseOSLayer.Find(); hr != nil {
+func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
+ hr = procHcsDetachOverlayFilter.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
+ r0, _, _ := syscall.SyscallN(procHcsDetachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -173,30 +188,36 @@ func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uin
return
}
-func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) {
+func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(writableLayerPath)
+ _p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(layerData)
+ _p1, hr = syscall.UTF16PtrFromString(exportFolderPath)
if hr != nil {
return
}
var _p2 *uint16
- _p2, hr = syscall.UTF16PtrFromString(options)
+ _p2, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
- return _hcsInitializeWritableLayer(_p0, _p1, _p2)
+ var _p3 *uint16
+ _p3, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsExportLayer(_p0, _p1, _p2, _p3)
}
-func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) {
- if hr = procHcsInitializeWritableLayer.Find(); hr != nil {
+func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) {
+ hr = procHcsExportLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
+ r0, _, _ := syscall.SyscallN(procHcsExportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -206,25 +227,27 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o
return
}
-func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(layerPath)
+func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
+ hr = procHcsFormatWritableLayerVhd.Find()
if hr != nil {
return
}
- var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(layerData)
- if hr != nil {
- return
+ r0, _, _ := syscall.SyscallN(procHcsFormatWritableLayerVhd.Addr(), uintptr(handle))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
- return _hcsAttachLayerStorageFilter(_p0, _p1)
+ return
}
-func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) {
- if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil {
+func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) {
+ hr = procHcsGetLayerVhdMountPath.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0)
+ r0, _, _ := syscall.SyscallN(procHcsGetLayerVhdMountPath.Addr(), uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -234,20 +257,31 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro
return
}
-func hcsDetachLayerStorageFilter(layerPath string) (hr error) {
+func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
- return _hcsDetachLayerStorageFilter(_p0)
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath)
+ if hr != nil {
+ return
+ }
+ var _p2 *uint16
+ _p2, hr = syscall.UTF16PtrFromString(layerData)
+ if hr != nil {
+ return
+ }
+ return _hcsImportLayer(_p0, _p1, _p2)
}
-func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
- if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil {
+func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) {
+ hr = procHcsImportLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsImportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -257,11 +291,31 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
return
}
-func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
- if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil {
+func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(writableLayerPath)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(layerData)
+ if hr != nil {
+ return
+ }
+ var _p2 *uint16
+ _p2, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
+ return _hcsInitializeWritableLayer(_p0, _p1, _p2)
+}
+
+func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) {
+ hr = procHcsInitializeWritableLayer.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procHcsInitializeWritableLayer.Addr(), uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -271,11 +325,26 @@ func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
return
}
-func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) {
- if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil {
+func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(layerPath)
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0)
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsSetupBaseOSLayer(_p0, handle, _p1)
+}
+
+func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) {
+ hr = procHcsSetupBaseOSLayer.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -305,10 +374,11 @@ func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (
}
func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) {
- if hr = procHcsSetupBaseOSVolume.Find(); hr != nil {
+ hr = procHcsSetupBaseOSVolume.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
+ r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSVolume.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go
index bfd722898..0ad7f495a 100644
--- a/vendor/github.com/Microsoft/hcsshim/container.go
+++ b/vendor/github.com/Microsoft/hcsshim/container.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
@@ -60,7 +62,7 @@ type container struct {
waitCh chan struct{}
}
-// createComputeSystemAdditionalJSON is read from the environment at initialisation
+// createContainerAdditionalJSON is read from the environment at initialization
// time. It allows an environment variable to define additional JSON which
// is merged in the CreateComputeSystem call to HCS.
var createContainerAdditionalJSON []byte
@@ -73,7 +75,7 @@ func init() {
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON)
if err != nil {
- return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
+ return nil, fmt.Errorf("failed to merge additional JSON '%s': %w", createContainerAdditionalJSON, err)
}
system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig)
diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go
index 794308673..b441b0cd3 100644
--- a/vendor/github.com/Microsoft/hcsshim/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/errors.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
@@ -50,6 +52,9 @@ var (
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = hcs.ErrUnexpectedValue
+ // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied
+ ErrOperationDenied = hcs.ErrOperationDenied
+
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
@@ -59,7 +64,7 @@ var (
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState
- // ErrProcNotFound is an error encountered when the the process cannot be found
+ // ErrProcNotFound is an error encountered when a procedure look up fails.
ErrProcNotFound = hcs.ErrProcNotFound
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
@@ -110,6 +115,7 @@ func (e *ContainerError) Error() string {
s += " encountered an error during " + e.Operation
}
+ //nolint:errorlint // legacy code
switch e.Err.(type) {
case nil:
break
@@ -140,6 +146,7 @@ func (e *ProcessError) Error() string {
s += " encountered an error during " + e.Operation
}
+ //nolint:errorlint // legacy code
switch e.Err.(type) {
case nil:
break
@@ -159,12 +166,12 @@ func (e *ProcessError) Error() string {
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
-// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+// will currently return true when the error is ErrElementNotFound.
func IsNotExist(err error) bool {
- if _, ok := err.(EndpointNotFoundError); ok {
+ if _, ok := err.(EndpointNotFoundError); ok { //nolint:errorlint // legacy code
return true
}
- if _, ok := err.(NetworkNotFoundError); ok {
+ if _, ok := err.(NetworkNotFoundError); ok { //nolint:errorlint // legacy code
return true
}
return hcs.IsNotExist(getInnerError(err))
@@ -192,7 +199,7 @@ func IsTimeout(err error) bool {
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
-// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+// will currently return true when the error is ErrElementNotFound.
func IsAlreadyStopped(err error) bool {
return hcs.IsAlreadyStopped(getInnerError(err))
}
@@ -219,6 +226,7 @@ func IsAccessIsDenied(err error) bool {
}
func getInnerError(err error) error {
+ //nolint:errorlint // legacy code
switch pe := err.(type) {
case nil:
return nil
@@ -231,14 +239,14 @@ func getInnerError(err error) error {
}
func convertSystemError(err error, c *container) error {
- if serr, ok := err.(*hcs.SystemError); ok {
+ if serr, ok := err.(*hcs.SystemError); ok { //nolint:errorlint // legacy code
return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events}
}
return err
}
func convertProcessError(err error, p *process) error {
- if perr, ok := err.(*hcs.ProcessError); ok {
+ if perr, ok := err.(*hcs.ProcessError); ok { //nolint:errorlint // legacy code
return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events}
}
return err
diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
deleted file mode 100644
index ce6edbcf3..000000000
--- a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
+++ /dev/null
@@ -1,12 +0,0 @@
-# Requirements so far:
-# dockerd running
-# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar
-# - image alpine (linux) docker pull --platform=linux alpine
-
-
-# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true"
-#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please"
-
-#pushd uvm
-go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./...
-#popd
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 21640a6b9..8c32c66f9 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -1,27 +1,123 @@
module github.com/Microsoft/hcsshim
-go 1.13
+go 1.22
require (
- github.com/Microsoft/go-winio v0.4.17
- github.com/containerd/cgroups v1.0.1
- github.com/containerd/console v1.0.2
- github.com/containerd/containerd v1.5.1
+ github.com/Microsoft/cosesign1go v1.2.0
+ github.com/Microsoft/didx509go v0.0.3
+ github.com/Microsoft/go-winio v0.6.2
+ github.com/blang/semver/v4 v4.0.0
+ github.com/cenkalti/backoff/v4 v4.3.0
+ github.com/containerd/cgroups/v3 v3.0.3
+ github.com/containerd/console v1.0.4
+ github.com/containerd/containerd v1.7.23
+ github.com/containerd/containerd/api v1.7.19
+ github.com/containerd/errdefs v0.3.0
+ github.com/containerd/errdefs/pkg v0.3.0
github.com/containerd/go-runc v1.0.0
- github.com/containerd/ttrpc v1.0.2
- github.com/containerd/typeurl v1.0.2
- github.com/gogo/protobuf v1.3.2
- github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
+ github.com/containerd/protobuild v0.3.0
+ github.com/containerd/ttrpc v1.2.5
+ github.com/containerd/typeurl/v2 v2.2.0
+ github.com/google/go-cmp v0.6.0
+ github.com/google/go-containerregistry v0.20.1
+ github.com/josephspurrier/goversioninfo v1.4.0
+ github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3
+ github.com/mattn/go-shellwords v1.0.12
+ github.com/open-policy-agent/opa v0.68.0
+ github.com/opencontainers/runc v1.1.14
+ github.com/opencontainers/runtime-spec v1.2.0
+ github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.7.0
- github.com/urfave/cli v1.22.2
- go.opencensus.io v0.22.3
- golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
- golang.org/x/sys v0.0.0-20210324051608-47abb6519492
- google.golang.org/grpc v1.33.2
+ github.com/sirupsen/logrus v1.9.3
+ github.com/urfave/cli v1.22.15
+ github.com/vishvananda/netlink v1.3.0
+ github.com/vishvananda/netns v0.0.4
+ go.etcd.io/bbolt v1.3.10
+ go.opencensus.io v0.24.0
+ go.uber.org/mock v0.4.0
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d
+ golang.org/x/sync v0.8.0
+ golang.org/x/sys v0.25.0
+ google.golang.org/grpc v1.67.0
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
+ google.golang.org/protobuf v1.34.2
)
-replace (
- google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
- google.golang.org/grpc => google.golang.org/grpc v1.27.1
+require (
+ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/OneOfOne/xxhash v1.2.8 // indirect
+ github.com/agnivade/levenshtein v1.1.1 // indirect
+ github.com/akavel/rsrc v0.10.2 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/containerd/continuity v0.4.2 // indirect
+ github.com/containerd/fifo v1.1.0 // indirect
+ github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/cli v24.0.0+incompatible // indirect
+ github.com/docker/distribution v2.8.2+incompatible // indirect
+ github.com/docker/docker v27.1.1+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.7.0 // indirect
+ github.com/docker/go-connections v0.4.0 // indirect
+ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fxamacker/cbor/v2 v2.4.0 // indirect
+ github.com/go-ini/ini v1.67.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/goccy/go-json v0.10.2 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/mux v1.8.1 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
+ github.com/lestrrat-go/blackmagic v1.0.2 // indirect
+ github.com/lestrrat-go/httpcc v1.0.1 // indirect
+ github.com/lestrrat-go/iter v1.0.2 // indirect
+ github.com/lestrrat-go/jwx v1.2.29 // indirect
+ github.com/lestrrat-go/option v1.0.1 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
+ github.com/moby/sys/mountinfo v0.6.2 // indirect
+ github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/sys/userns v0.1.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
+ github.com/prometheus/client_golang v1.20.2 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+ github.com/vbatts/tar-split v0.11.3 // indirect
+ github.com/veraison/go-cose v1.1.0 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+ github.com/yashtewari/glob-intersection v0.2.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
+ go.opentelemetry.io/otel v1.28.0 // indirect
+ go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/otel/trace v1.28.0 // indirect
+ golang.org/x/crypto v0.26.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
+ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
+ google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum
index b7bede13f..f2d566bf8 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.sum
+++ b/vendor/github.com/Microsoft/hcsshim/go.sum
@@ -1,806 +1,445 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/Microsoft/cosesign1go v1.2.0 h1:Hpj/mI6kP1eBkaOv9XEBRwO+Ju24k8XIo/A+OQyjmlw=
+github.com/Microsoft/cosesign1go v1.2.0/go.mod h1:1La/HcGw19rRLhPW0S6u55K6LKfti+GQSgGCtrfhVe8=
+github.com/Microsoft/didx509go v0.0.3 h1:n/owuFOXVzCEzSyzivMEolKEouBm9G0NrEDgoTekM8A=
+github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7rLF7yLglYbk=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw=
+github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
+github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
+github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1 h1:xWHPAoe6VkUiI9GAvndJM7s/0MTrmwX3AQiYTr3olf0=
-github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
+github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
+github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ=
+github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
+github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
+github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
+github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
+github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
+github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
+github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
+github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/protobuild v0.3.0 h1:RIyEIu+D+iIha6E1PREBPAXspSMFaDVam81JlolZWpg=
+github.com/containerd/protobuild v0.3.0/go.mod h1:5mNMFKKAwCIAkFBPiOdtRx2KiQlyEJeMXnL5R1DsWu8=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
+github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
+github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
+github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg=
+github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
+github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
+github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM=
+github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
+github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
+github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
+github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
+github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
+github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
+github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
+github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
+github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
+github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw=
+github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
+github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/josephspurrier/goversioninfo v1.4.0 h1:Puhl12NSHUSALHSuzYwPYQkqa2E1+7SrtAPJorKK0C8=
+github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
+github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
+github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k=
+github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
+github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
+github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI=
+github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
+github.com/lestrrat-go/jwx v1.2.29 h1:QT0utmUJ4/12rmsVQrJ3u55bycPkKqGYuGT4tyRhxSQ=
+github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
+github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
+github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ=
+github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w=
+github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
+github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
+github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM=
+github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dmRIc0=
+github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
+github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
+github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o=
+github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
+github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
+github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
+github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
+github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
+github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
+golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -808,78 +447,23 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go
index ceb3ac85e..13f80e4a8 100644
--- a/vendor/github.com/Microsoft/hcsshim/hcsshim.go
+++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go
@@ -1,15 +1,17 @@
+//go:build windows
+
// Shim for the Host Compute Service (HCS) to manage Windows Server
// containers and Hyper-V containers.
package hcsshim
import (
- "syscall"
+ "golang.org/x/sys/windows"
"github.com/Microsoft/hcsshim/internal/hcserror"
)
-//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hcsshim.go
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
@@ -17,9 +19,9 @@ const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
- ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE
- ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
- WSAEINVAL = syscall.Errno(10022)
+ ERROR_GEN_FAILURE = windows.ERROR_GEN_FAILURE
+ ERROR_SHUTDOWN_IN_PROGRESS = windows.ERROR_SHUTDOWN_IN_PROGRESS
+ WSAEINVAL = windows.WSAEINVAL
// Timeout on wait calls
TimeoutInfinite = 0xFFFFFFFF
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go
new file mode 100644
index 000000000..86c7c22aa
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go
@@ -0,0 +1,46 @@
+//go:build windows
+
+package hcsshim
+
+import (
+ "errors"
+
+ "github.com/Microsoft/hcsshim/internal/hns"
+)
+
+// HNSNnvManagementMacAddress represents management mac address
+// which needs to be excluded from VF reassignment
+type HNSNnvManagementMacAddress = hns.HNSNnvManagementMacAddress
+
+// HNSNnvManagementMacList represents a list of management
+// mac addresses for exclusion from VF reassignment
+type HNSNnvManagementMacList = hns.HNSNnvManagementMacList
+
+var (
+ ErrorEmptyMacAddressList = errors.New("management mac_address list is empty")
+)
+
+// SetNnvManagementMacAddresses sets a list of
+// management mac addresses in hns for exclusion from VF reassignment.
+func SetNnvManagementMacAddresses(managementMacAddresses []string) (*HNSNnvManagementMacList, error) {
+ if len(managementMacAddresses) == 0 {
+ return nil, ErrorEmptyMacAddressList
+ }
+ nnvManagementMacList := &HNSNnvManagementMacList{}
+ for _, mac := range managementMacAddresses {
+ nnvManagementMacList.MacAddressList = append(nnvManagementMacList.MacAddressList, HNSNnvManagementMacAddress{MacAddress: mac})
+ }
+ return nnvManagementMacList.Set()
+}
+
+// GetNnvManagementMacAddresses retrieves a list of
+// management mac addresses in hns for exclusion from VF reassignment.
+func GetNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) {
+ return hns.GetNnvManagementMacAddressList()
+}
+
+// DeleteNnvManagementMacAddresses delete list of
+// management mac addresses in hns which are excluded from VF reassignment.
+func DeleteNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) {
+ return hns.DeleteNnvManagementMacAddressList()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
index 408312672..d8a73de98 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
@@ -7,10 +9,13 @@ import (
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint = hns.HNSEndpoint
+// HNSEndpointStats represent the stats for an networkendpoint in HNS
+type HNSEndpointStats = hns.EndpointStats
+
// Namespace represents a Compartment.
type Namespace = hns.Namespace
-//SystemType represents the type of the system on which actions are done
+// SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const
@@ -108,3 +113,8 @@ func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
return hns.GetHNSEndpointByName(endpointName)
}
+
+// GetHNSEndpointStats gets the endpoint stats by ID
+func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) {
+ return hns.GetHNSEndpointStats(endpointName)
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go
index 2b5381904..c564bf4a3 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
index f775fa1d0..925c21249 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
@@ -1,14 +1,16 @@
+//go:build windows
+
package hcsshim
import (
"github.com/Microsoft/hcsshim/internal/hns"
)
-// Subnet is assoicated with a network and represents a list
+// Subnet is associated with a network and represents a list
// of subnets available to the network
type Subnet = hns.Subnet
-// MacPool is assoicated with a network and represents a list
+// MacPool is associated with a network and represents a list
// of macaddresses available to the network
type MacPool = hns.MacPool
diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
index 55aaa4a50..9bfe61ee8 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go
index 69405244b..d97681e0c 100644
--- a/vendor/github.com/Microsoft/hcsshim/hnssupport.go
+++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go
index 300eb5996..81a281951 100644
--- a/vendor/github.com/Microsoft/hcsshim/interface.go
+++ b/vendor/github.com/Microsoft/hcsshim/interface.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
index 27a62a723..b60cd383b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package cow
import (
@@ -86,6 +88,12 @@ type Container interface {
// container to be terminated by some error condition (including calling
// Close).
Wait() error
+ // WaitChannel returns the wait channel of the container
+ WaitChannel() <-chan struct{}
+ // WaitError returns the container termination error.
+ // This function should only be called after the channel in WaitChannel()
+ // is closed. Otherwise it is not thread safe.
+ WaitError() error
// Modify sends a request to modify container resources
Modify(ctx context.Context, config interface{}) error
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
index d13772b03..7b27173c3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcs
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go
new file mode 100644
index 000000000..d792dda98
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go
@@ -0,0 +1 @@
+package hcs
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
index 7696e4b48..3e10f5c7e 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcs
import (
@@ -51,6 +53,9 @@ var (
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
+ // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied
+ ErrOperationDenied = errors.New("operation denied")
+
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
@@ -60,7 +65,7 @@ var (
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
- // ErrProcNotFound is an error encountered when the the process cannot be found
+ // ErrProcNotFound is an error encountered when a procedure look up fails.
ErrProcNotFound = syscall.Errno(0x7f)
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
@@ -78,6 +83,13 @@ var (
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
+
+ // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
+ ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
+
+ // ErrInvalidHandle is an error that can be encountered when querying the properties of a compute system when the handle to that
+ // compute system has already been closed.
+ ErrInvalidHandle = syscall.Errno(0x6)
)
type ErrorEvent struct {
@@ -145,33 +157,38 @@ func (e *HcsError) Error() string {
return s
}
+func (e *HcsError) Is(target error) bool {
+ return errors.Is(e.Err, target)
+}
+
+// unwrap isnt really needed, but helpful convince function
+
+func (e *HcsError) Unwrap() error {
+ return e.Err
+}
+
+// Deprecated: net.Error.Temporary is deprecated.
func (e *HcsError) Temporary() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Temporary()
+ err := e.netError()
+ return (err != nil) && err.Temporary()
}
func (e *HcsError) Timeout() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Timeout()
+ err := e.netError()
+ return (err != nil) && err.Timeout()
}
-// ProcessError is an error encountered in HCS during an operation on a Process object
-type ProcessError struct {
- SystemID string
- Pid int
- Op string
- Err error
- Events []ErrorEvent
+func (e *HcsError) netError() (err net.Error) {
+ if errors.As(e.Unwrap(), &err) {
+ return err
+ }
+ return nil
}
-var _ net.Error = &ProcessError{}
-
// SystemError is an error encountered in HCS during an operation on a Container object
type SystemError struct {
- ID string
- Op string
- Err error
- Events []ErrorEvent
+ HcsError
+ ID string
}
var _ net.Error = &SystemError{}
@@ -184,29 +201,32 @@ func (e *SystemError) Error() string {
return s
}
-func (e *SystemError) Temporary() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Temporary()
-}
-
-func (e *SystemError) Timeout() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Timeout()
-}
-
func makeSystemError(system *System, op string, err error, events []ErrorEvent) error {
// Don't double wrap errors
- if _, ok := err.(*SystemError); ok {
+ var e *SystemError
+ if errors.As(err, &e) {
return err
}
+
return &SystemError{
- ID: system.ID(),
- Op: op,
- Err: err,
- Events: events,
+ ID: system.ID(),
+ HcsError: HcsError{
+ Op: op,
+ Err: err,
+ Events: events,
+ },
}
}
+// ProcessError is an error encountered in HCS during an operation on a Process object
+type ProcessError struct {
+ HcsError
+ SystemID string
+ Pid int
+}
+
+var _ net.Error = &ProcessError{}
+
func (e *ProcessError) Error() string {
s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error())
for _, ev := range e.Events {
@@ -215,75 +235,72 @@ func (e *ProcessError) Error() string {
return s
}
-func (e *ProcessError) Temporary() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Temporary()
-}
-
-func (e *ProcessError) Timeout() bool {
- err, ok := e.Err.(net.Error)
- return ok && err.Timeout()
-}
-
func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error {
// Don't double wrap errors
- if _, ok := err.(*ProcessError); ok {
+ var e *ProcessError
+ if errors.As(err, &e) {
return err
}
return &ProcessError{
Pid: process.Pid(),
SystemID: process.SystemID(),
- Op: op,
- Err: err,
- Events: events,
+ HcsError: HcsError{
+ Op: op,
+ Err: err,
+ Events: events,
+ },
}
}
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
-// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+// will currently return true when the error is ErrElementNotFound.
func IsNotExist(err error) bool {
- err = getInnerError(err)
- return err == ErrComputeSystemDoesNotExist ||
- err == ErrElementNotFound ||
- err == ErrProcNotFound
+ return IsAny(err, ErrComputeSystemDoesNotExist, ErrElementNotFound)
+}
+
+// IsErrorInvalidHandle checks whether the error is the result of an operation carried
+// out on a handle that is invalid/closed. This error popped up while trying to query
+// stats on a container in the process of being stopped.
+func IsErrorInvalidHandle(err error) bool {
+ return errors.Is(err, ErrInvalidHandle)
}
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
- err = getInnerError(err)
- return err == ErrAlreadyClosed
+ return errors.Is(err, ErrAlreadyClosed)
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
- err = getInnerError(err)
- return err == ErrVmcomputeOperationPending
+ return errors.Is(err, ErrVmcomputeOperationPending)
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
- if err, ok := err.(net.Error); ok && err.Timeout() {
+ // HcsError and co. implement Timeout regardless of whether the errors they wrap do,
+ // so `errors.As(err, net.Error)`` will always be true.
+ // Using `errors.As(err.Unwrap(), net.Err)` wont work for general errors.
+ // So first check if there an `ErrTimeout` in the chain, then convert to a net error.
+ if errors.Is(err, ErrTimeout) {
return true
}
- err = getInnerError(err)
- return err == ErrTimeout
+
+ var nerr net.Error
+ return errors.As(err, &nerr) && nerr.Timeout()
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
-// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+// will currently return true when the error is ErrElementNotFound.
func IsAlreadyStopped(err error) bool {
- err = getInnerError(err)
- return err == ErrVmcomputeAlreadyStopped ||
- err == ErrElementNotFound ||
- err == ErrProcNotFound
+ return IsAny(err, ErrVmcomputeAlreadyStopped, ErrProcessAlreadyStopped, ErrElementNotFound)
}
// IsNotSupported returns a boolean indicating whether the error is caused by
@@ -292,38 +309,28 @@ func IsAlreadyStopped(err error) bool {
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
// is thrown from the Platform
func IsNotSupported(err error) bool {
- err = getInnerError(err)
// If Platform doesn't recognize or support the request sent, below errors are seen
- return err == ErrVmcomputeInvalidJSON ||
- err == ErrInvalidData ||
- err == ErrNotSupported ||
- err == ErrVmcomputeUnknownMessage
+ return IsAny(err, ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported, ErrVmcomputeUnknownMessage)
}
// IsOperationInvalidState returns true when err is caused by
// `ErrVmcomputeOperationInvalidState`.
func IsOperationInvalidState(err error) bool {
- err = getInnerError(err)
- return err == ErrVmcomputeOperationInvalidState
+ return errors.Is(err, ErrVmcomputeOperationInvalidState)
}
// IsAccessIsDenied returns true when err is caused by
// `ErrVmcomputeOperationAccessIsDenied`.
func IsAccessIsDenied(err error) bool {
- err = getInnerError(err)
- return err == ErrVmcomputeOperationAccessIsDenied
+ return errors.Is(err, ErrVmcomputeOperationAccessIsDenied)
}
-func getInnerError(err error) error {
- switch pe := err.(type) {
- case nil:
- return nil
- case *HcsError:
- err = pe.Err
- case *SystemError:
- err = pe.Err
- case *ProcessError:
- err = pe.Err
+// IsAny is a vectorized version of [errors.Is], it returns true if err is one of targets.
+func IsAny(err error, targets ...error) bool {
+ for _, e := range targets {
+ if errors.Is(err, e) {
+ return true
+ }
}
- return err
+ return false
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index 8f2034668..8ef611d6a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -1,31 +1,39 @@
+//go:build windows
+
package hcs
import (
"context"
"encoding/json"
+ "errors"
"io"
+ "os"
"sync"
"syscall"
"time"
+ "go.opencensus.io/trace"
+
+ "github.com/Microsoft/hcsshim/internal/cow"
+ hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
+ "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
"github.com/Microsoft/hcsshim/internal/vmcompute"
- "go.opencensus.io/trace"
)
-// ContainerError is an error encountered in HCS
type Process struct {
- handleLock sync.RWMutex
- handle vmcompute.HcsProcess
- processID int
- system *System
- hasCachedStdio bool
- stdioLock sync.Mutex
- stdin io.WriteCloser
- stdout io.ReadCloser
- stderr io.ReadCloser
- callbackNumber uintptr
+ handleLock sync.RWMutex
+ handle vmcompute.HcsProcess
+ processID int
+ system *System
+ hasCachedStdio bool
+ stdioLock sync.Mutex
+ stdin io.WriteCloser
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ callbackNumber uintptr
+ killSignalDelivered bool
closedWaitOnce sync.Once
waitBlock chan struct{}
@@ -33,6 +41,8 @@ type Process struct {
waitError error
}
+var _ cow.Process = &Process{}
+
func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process {
return &Process{
handle: process,
@@ -42,35 +52,6 @@ func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *Syst
}
}
-type processModifyRequest struct {
- Operation string
- ConsoleSize *consoleSize `json:",omitempty"`
- CloseHandle *closeHandle `json:",omitempty"`
-}
-
-type consoleSize struct {
- Height uint16
- Width uint16
-}
-
-type closeHandle struct {
- Handle string
-}
-
-type processStatus struct {
- ProcessID uint32
- Exited bool
- ExitCode uint32
- LastWaitResult int32
-}
-
-const stdIn string = "StdIn"
-
-const (
- modifyConsoleSize string = "ConsoleSize"
- modifyCloseHandle string = "CloseHandle"
-)
-
// Pid returns the process ID of the process within the container.
func (process *Process) Pid() int {
return process.processID
@@ -82,14 +63,11 @@ func (process *Process) SystemID() string {
}
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
- switch err {
+ switch err { //nolint:errorlint
case nil:
return true, nil
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
- select {
- case <-process.waitBlock:
- // The process exit notification has already arrived.
- default:
+ if !process.stopped() {
// The process should be gone, but we have not received the notification.
// After a second, force unblock the process wait to work around a possible
// deadlock in the HCS.
@@ -111,9 +89,9 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo
// Signal signals the process with `options`.
//
-// For LCOW `guestrequest.SignalProcessOptionsLCOW`.
+// For LCOW `guestresource.SignalProcessOptionsLCOW`.
//
-// For WCOW `guestrequest.SignalProcessOptionsWCOW`.
+// For WCOW `guestresource.SignalProcessOptionsWCOW`.
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
@@ -149,12 +127,81 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
- resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
+ if process.stopped() {
+ return false, makeProcessError(process, operation, ErrProcessAlreadyStopped, nil)
+ }
+
+ if process.killSignalDelivered {
+ // A kill signal has already been sent to this process. Sending a second
+ // one offers no real benefit, as processes cannot stop themselves from
+ // being terminated, once a TerminateProcess has been issued. Sending a
+ // second kill may result in a number of errors (two of which detailed bellow)
+ // and which we can avoid handling.
+ return true, nil
+ }
+
+ // HCS serializes the signals sent to a target pid per compute system handle.
+ // To avoid SIGKILL being serialized behind other signals, we open a new compute
+ // system handle to deliver the kill signal.
+ // If the calls to opening a new compute system handle fail, we forcefully
+ // terminate the container itself so that no container is left behind
+ hcsSystem, err := OpenComputeSystem(ctx, process.system.id)
+ if err != nil {
+ // log error and force termination of container
+ log.G(ctx).WithField("err", err).Error("OpenComputeSystem() call failed")
+ err = process.system.Terminate(ctx)
+ // if the Terminate() call itself ever failed, log and return error
+ if err != nil {
+ log.G(ctx).WithField("err", err).Error("Terminate() call failed")
+ return false, err
+ }
+ process.system.Close()
+ return true, nil
+ }
+ defer hcsSystem.Close()
+
+ newProcessHandle, err := hcsSystem.OpenProcess(ctx, process.Pid())
+ if err != nil {
+ // Return true only if the target process has either already
+ // exited, or does not exist.
+ if IsAlreadyStopped(err) {
+ return true, nil
+ } else {
+ return false, err
+ }
+ }
+ defer newProcessHandle.Close()
+
+ resultJSON, err := vmcompute.HcsTerminateProcess(ctx, newProcessHandle.handle)
+ if err != nil {
+ // We still need to check these two cases, as processes may still be killed by an
+ // external actor (human operator, OOM, random script etc).
+ if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
+ // There are two cases where it should be safe to ignore an error returned
+ // by HcsTerminateProcess. The first one is cause by the fact that
+ // HcsTerminateProcess ends up calling TerminateProcess in the context
+ // of a container. According to the TerminateProcess documentation:
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
+ // After a process has terminated, call to TerminateProcess with open
+ // handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
+ // It's safe to ignore this error here. HCS should always have permissions
+ // to kill processes inside any container. So an ERROR_ACCESS_DENIED
+ // is unlikely to be anything else than what the ending remarks in the
+ // documentation states.
+ //
+ // The second case is generated by hcs itself, if for any reason HcsTerminateProcess
+ // is called twice in a very short amount of time. In such cases, hcs may return
+ // HCS_E_PROCESS_ALREADY_STOPPED.
+ return true, nil
+ }
+ }
events := processHcsResult(ctx, resultJSON)
- delivered, err := process.processSignalResult(ctx, err)
+ delivered, err := newProcessHandle.processSignalResult(ctx, err)
if err != nil {
- err = makeProcessError(process, operation, err, events)
+ err = makeProcessError(newProcessHandle, operation, err, events)
}
+
+ process.killSignalDelivered = delivered
return delivered, err
}
@@ -165,7 +212,7 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
// call multiple times.
func (process *Process) waitBackground() {
operation := "hcs::Process::waitBackground"
- ctx, span := trace.StartSpan(context.Background(), operation)
+ ctx, span := oc.StartSpan(context.Background(), operation)
defer span.End()
span.AddAttributes(
trace.StringAttribute("cid", process.SystemID()),
@@ -186,17 +233,17 @@ func (process *Process) waitBackground() {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
- // Make sure we didnt race with Close() here
+ // Make sure we didn't race with Close() here
if process.handle != 0 {
propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
if err != nil {
- err = makeProcessError(process, operation, err, events) //nolint:ineffassign
+ err = makeProcessError(process, operation, err, events)
} else {
- properties := &processStatus{}
+ properties := &hcsschema.ProcessStatus{}
err = json.Unmarshal([]byte(propertiesJSON), properties)
if err != nil {
- err = makeProcessError(process, operation, err, nil) //nolint:ineffassign
+ err = makeProcessError(process, operation, err, nil)
} else {
if properties.LastWaitResult != 0 {
log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result")
@@ -218,12 +265,22 @@ func (process *Process) waitBackground() {
}
// Wait waits for the process to exit. If the process has already exited returns
-// the pervious error (if any).
+// the previous error (if any).
func (process *Process) Wait() error {
<-process.waitBlock
return process.waitError
}
+// Exited returns if the process has stopped
+func (process *Process) stopped() bool {
+ select {
+ case <-process.waitBlock:
+ return true
+ default:
+ return false
+ }
+}
+
// ResizeConsole resizes the console of the process.
func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error {
process.handleLock.RLock()
@@ -234,10 +291,9 @@ func (process *Process) ResizeConsole(ctx context.Context, width, height uint16)
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
-
- modifyRequest := processModifyRequest{
- Operation: modifyConsoleSize,
- ConsoleSize: &consoleSize{
+ modifyRequest := hcsschema.ProcessModifyRequest{
+ Operation: guestrequest.ModifyProcessConsoleSize,
+ ConsoleSize: &hcsschema.ConsoleSize{
Height: height,
Width: width,
},
@@ -260,15 +316,13 @@ func (process *Process) ResizeConsole(ctx context.Context, width, height uint16)
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *Process) ExitCode() (int, error) {
- select {
- case <-process.waitBlock:
- if process.waitError != nil {
- return -1, process.waitError
- }
- return process.exitCode, nil
- default:
+ if !process.stopped() {
return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil)
}
+ if process.waitError != nil {
+ return -1, process.waitError
+ }
+ return process.exitCode, nil
}
// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing
@@ -276,7 +330,7 @@ func (process *Process) ExitCode() (int, error) {
// are the responsibility of the caller to close.
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
operation := "hcs::Process::StdioLegacy"
- ctx, span := trace.StartSpan(context.Background(), operation)
+ ctx, span := oc.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -314,7 +368,7 @@ func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.R
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively.
-// To close them, close the process handle.
+// To close them, close the process handle, or use the `CloseStd*` functions.
func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
process.stdioLock.Lock()
defer process.stdioLock.Unlock()
@@ -323,46 +377,55 @@ func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
-func (process *Process) CloseStdin(ctx context.Context) error {
+func (process *Process) CloseStdin(ctx context.Context) (err error) {
+ operation := "hcs::Process::CloseStdin"
+ ctx, span := trace.StartSpan(ctx, operation)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(
+ trace.StringAttribute("cid", process.SystemID()),
+ trace.Int64Attribute("pid", int64(process.processID)))
+
process.handleLock.RLock()
defer process.handleLock.RUnlock()
- operation := "hcs::Process::CloseStdin"
-
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
- modifyRequest := processModifyRequest{
- Operation: modifyCloseHandle,
- CloseHandle: &closeHandle{
- Handle: stdIn,
- },
- }
+ //HcsModifyProcess request to close stdin will fail if the process has already exited
+ if !process.stopped() {
+ modifyRequest := hcsschema.ProcessModifyRequest{
+ Operation: guestrequest.CloseProcessHandle,
+ CloseHandle: &hcsschema.CloseHandle{
+ Handle: guestrequest.STDInHandle,
+ },
+ }
- modifyRequestb, err := json.Marshal(modifyRequest)
- if err != nil {
- return err
- }
+ modifyRequestb, err := json.Marshal(modifyRequest)
+ if err != nil {
+ return err
+ }
- resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
- events := processHcsResult(ctx, resultJSON)
- if err != nil {
- return makeProcessError(process, operation, err, events)
+ resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
+ events := processHcsResult(ctx, resultJSON)
+ if err != nil {
+ return makeProcessError(process, operation, err, events)
+ }
}
process.stdioLock.Lock()
+ defer process.stdioLock.Unlock()
if process.stdin != nil {
process.stdin.Close()
process.stdin = nil
}
- process.stdioLock.Unlock()
return nil
}
func (process *Process) CloseStdout(ctx context.Context) (err error) {
- ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -386,7 +449,7 @@ func (process *Process) CloseStdout(ctx context.Context) (err error) {
}
func (process *Process) CloseStderr(ctx context.Context) (err error) {
- ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -405,7 +468,6 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) {
if process.stderr != nil {
process.stderr.Close()
process.stderr = nil
-
}
return nil
}
@@ -414,7 +476,7 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) {
// or wait on it.
func (process *Process) Close() (err error) {
operation := "hcs::Process::Close"
- ctx, span := trace.StartSpan(context.Background(), operation)
+ ctx, span := oc.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go
index b621c5593..d1f219cfa 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package schema1
import (
@@ -101,7 +103,7 @@ type ContainerConfig struct {
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
Servicing bool `json:",omitempty"` // True if this container is for servicing
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
- DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
+ DNSSearchList string `json:",omitempty"` // Comma separated list of DNS suffixes to use for name resolution
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
index bcfeb34d5..70884aad7 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
@@ -27,4 +27,10 @@ type Attachment struct {
CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
+
+ SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"`
+
+ AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"`
+
+ ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go
new file mode 100644
index 000000000..81865e7ea
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go
@@ -0,0 +1,25 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.5
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+const (
+ CimMountFlagNone uint32 = 0x0
+ CimMountFlagChildOnly uint32 = 0x1
+ CimMountFlagEnableDax uint32 = 0x2
+ CimMountFlagCacheFiles uint32 = 0x4
+ CimMountFlagCacheRegions uint32 = 0x8
+)
+
+type CimMount struct {
+ ImagePath string `json:"ImagePath,omitempty"`
+ FileSystemName string `json:"FileSystemName,omitempty"`
+ VolumeGuid string `json:"VolumeGuid,omitempty"`
+ MountFlags uint32 `json:"MountFlags,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go
index b4f9c315b..bb36777b8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go
@@ -9,6 +9,8 @@
package hcsschema
+import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
+
type CloseHandle struct {
- Handle string `json:"Handle,omitempty"`
+ Handle guestrequest.STDIOHandle `json:"Handle,omitempty"` // NOTE: Swagger generated as string. Locally updated.
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go
index 68aa04a57..347da50e8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go
@@ -9,8 +9,11 @@
package hcsschema
+// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation.
+// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize
+
type ConsoleSize struct {
- Height int32 `json:"Height,omitempty"`
+ Height uint16 `json:"Height,omitempty"`
- Width int32 `json:"Width,omitempty"`
+ Width uint16 `json:"Width,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
index 4fb231076..39a54432c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
@@ -31,4 +31,6 @@ type Container struct {
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
AssignedDevices []Device `json:"AssignedDevices,omitempty"`
+
+ AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
index f1a28cd38..0be0475d4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
@@ -14,5 +14,5 @@ type CpuGroupConfig struct {
Affinity *CpuGroupAffinity `json:"Affinity,omitempty"`
GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"`
// Hypervisor CPU group IDs exposed to clients
- HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"`
+ HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go
index bbad6a2c4..31fe07c3a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go
@@ -9,6 +9,14 @@
package hcsschema
+type CPUGroupPropertyCode uint32
+
+const (
+ CPUCapacityProperty = 0x00010000
+ CPUSchedulingPriorityProperty = 0x00020000
+ IdleLPReserveProperty = 0x00030000
+)
+
type CpuGroupProperty struct {
PropertyCode uint32 `json:"PropertyCode,omitempty"`
PropertyValue uint32 `json:"PropertyValue,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go
new file mode 100644
index 000000000..5385850fe
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go
@@ -0,0 +1,22 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.1
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DebugOptions struct {
+ // BugcheckSavedStateFileName is the path for the file in which the guest VM state will be saved when
+ // the guest crashes.
+ BugcheckSavedStateFileName string `json:"BugcheckSavedStateFileName,omitempty"`
+ // BugcheckNoCrashdumpSavedStateFileName is the path of the file in which the guest VM state will be
+ // saved when the guest crashes but the guest isn't able to generate the crash dump. This usually
+ // happens in early boot failures.
+ BugcheckNoCrashdumpSavedStateFileName string `json:"BugcheckNoCrashdumpSavedStateFileName,omitempty"`
+ TripleFaultSavedStateFileName string `json:"TripleFaultSavedStateFileName,omitempty"`
+ FirmwareDumpFileName string `json:"FirmwareDumpFileName,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
index 107caddad..31c4538af 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
@@ -12,9 +12,9 @@ package hcsschema
type DeviceType string
const (
- ClassGUID DeviceType = "ClassGuid"
- DeviceInstance DeviceType = "DeviceInstance"
- GPUMirror DeviceType = "GpuMirror"
+ ClassGUID DeviceType = "ClassGuid"
+ DeviceInstanceID DeviceType = "DeviceInstance"
+ GPUMirror DeviceType = "GpuMirror"
)
type Device struct {
@@ -22,6 +22,6 @@ type Device struct {
Type DeviceType `json:"Type,omitempty"`
// The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid.
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
- // The location path of the device to assign to the container. Only used when Type is DeviceInstance.
+ // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID.
LocationPath string `json:"LocationPath,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go
index ef1eec886..a48a65394 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go
@@ -14,6 +14,9 @@ type GuestState struct {
// The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state.
GuestStateFilePath string `json:"GuestStateFilePath,omitempty"`
+ // The guest state file type affected by different guest isolation modes - whether a file or block storage.
+ GuestStateFileType string `json:"GuestStateFileType,omitempty"`
+
// The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state.
RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go
new file mode 100644
index 000000000..a34c2f99a
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go
@@ -0,0 +1,21 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type IsolationSettings struct {
+ // Guest isolation type options to decide virtual trust levels of virtual machine
+ IsolationType string `json:"IsolationType,omitempty"`
+ // Configuration to debug HCL layer for HCS VM TODO: Task 31102306: Miss the way to prevent the exposure of private debug configuration in HCS TODO: Think about the secret configurations which are private in VMMS VM (only edit by hvsedit)
+ DebugHost string `json:"DebugHost,omitempty"`
+ DebugPort int64 `json:"DebugPort,omitempty"`
+ // Optional data passed by host on isolated virtual machine start
+ LaunchData string `json:"LaunchData,omitempty"`
+ HclEnabled *bool `json:"HclEnabled,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go
index 176c49d49..cb8dea08d 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go
@@ -9,6 +9,13 @@
package hcsschema
+type FileSystemFilterType string
+
+const (
+ UnionFS FileSystemFilterType = "UnionFS"
+ WCIFS FileSystemFilterType = "WCIFS"
+)
+
type Layer struct {
Id string `json:"Id,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
new file mode 100644
index 000000000..8dbe40b3b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
@@ -0,0 +1,14 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ContainerDefinitionDevice struct {
+ DeviceExtension []DeviceExtension `json:"device_extension,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
new file mode 100644
index 000000000..8fe89f927
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceCategory struct {
+ Name string `json:"name,omitempty"`
+ InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
new file mode 100644
index 000000000..a62568d89
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceExtension struct {
+ DeviceCategory *DeviceCategory `json:"device_category,omitempty"`
+ Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
new file mode 100644
index 000000000..a7410febd
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
@@ -0,0 +1,17 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceInstance struct {
+ Id string `json:"id,omitempty"`
+ LocationPath string `json:"location_path,omitempty"`
+ PortName string `json:"port_name,omitempty"`
+ InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
new file mode 100644
index 000000000..355364064
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceNamespace struct {
+ RequiresDriverstore bool `json:"requires_driverstore,omitempty"`
+ DeviceCategory []DeviceCategory `json:"device_category,omitempty"`
+ DeviceInstance []DeviceInstance `json:"device_instance,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
new file mode 100644
index 000000000..7be98b541
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type InterfaceClass struct {
+ Type_ string `json:"type,omitempty"`
+ Identifier string `json:"identifier,omitempty"`
+ Recurse bool `json:"recurse,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
new file mode 100644
index 000000000..3ab9cf1ec
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceExtensionNamespace struct {
+ Ob *ObjectNamespace `json:"ob,omitempty"`
+ Device *DeviceNamespace `json:"device,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
new file mode 100644
index 000000000..d2f51b3b5
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
@@ -0,0 +1,18 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectDirectory struct {
+ Name string `json:"name,omitempty"`
+ Clonesd string `json:"clonesd,omitempty"`
+ Shadow string `json:"shadow,omitempty"`
+ Symlink []ObjectSymlink `json:"symlink,omitempty"`
+ Objdir []ObjectDirectory `json:"objdir,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
new file mode 100644
index 000000000..47dfb55bf
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectNamespace struct {
+ Shadow string `json:"shadow,omitempty"`
+ Symlink []ObjectSymlink `json:"symlink,omitempty"`
+ Objdir []ObjectDirectory `json:"objdir,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
new file mode 100644
index 000000000..8867ebe5f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
@@ -0,0 +1,18 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectSymlink struct {
+ Name string `json:"name,omitempty"`
+ Path string `json:"path,omitempty"`
+ Scope string `json:"scope,omitempty"`
+ Pathtoclone string `json:"pathtoclone,omitempty"`
+ AccessMask int32 `json:"access_mask,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go
index d29455a3e..6364da8e2 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go
@@ -9,10 +9,12 @@
package hcsschema
+import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
+
type ModifySettingRequest struct {
ResourcePath string `json:"ResourcePath,omitempty"`
- RequestType string `json:"RequestType,omitempty"`
+ RequestType guestrequest.RequestType `json:"RequestType,omitempty"` // NOTE: Swagger generated as string. Locally updated.
Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go
index e4ed095c7..862b7911e 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go
@@ -9,9 +9,11 @@
package hcsschema
-// Passed to HcsRpc_ModifyProcess
+import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
+
+// Passed to HcsRpc_ModifyProcess
type ProcessModifyRequest struct {
- Operation string `json:"Operation,omitempty"`
+ Operation guestrequest.ProcessModifyOperation `json:"Operation,omitempty"` // NOTE: Swagger generated as string. Locally updated.
ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go
index ad9a4fa9a..3c371d465 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go
@@ -9,13 +9,16 @@
package hcsschema
-// Status of a process running in a container
+// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation.
+// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize
+
+// Status of a process running in a container
type ProcessStatus struct {
- ProcessId int32 `json:"ProcessId,omitempty"`
+ ProcessId uint32 `json:"ProcessId,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation.
Exited bool `json:"Exited,omitempty"`
- ExitCode int32 `json:"ExitCode,omitempty"`
+ ExitCode uint32 `json:"ExitCode,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation.
LastWaitResult int32 `json:"LastWaitResult,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go
index 17558cba0..0c7efe8d4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go
@@ -10,7 +10,7 @@
package hcsschema
import (
- v1 "github.com/containerd/cgroups/stats/v1"
+ v1 "github.com/containerd/cgroups/v3/cgroup1/stats"
)
type Properties struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go
new file mode 100644
index 000000000..e7b605fda
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go
@@ -0,0 +1,13 @@
+package hcsschema
+
+// NOTE: manually added
+
+type RegistryHive string
+
+// List of RegistryHive
+const (
+ RegistryHive_SYSTEM RegistryHive = "System"
+ RegistryHive_SOFTWARE RegistryHive = "Software"
+ RegistryHive_SECURITY RegistryHive = "Security"
+ RegistryHive_SAM RegistryHive = "Sam"
+)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go
index 26fde99c7..1883444a5 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go
@@ -10,7 +10,7 @@
package hcsschema
type RegistryKey struct {
- Hive string `json:"Hive,omitempty"`
+ Hive RegistryHive `json:"Hive,omitempty"`
Name string `json:"Name,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go
index 3f203176c..13f24d536 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go
@@ -14,7 +14,7 @@ type RegistryValue struct {
Name string `json:"Name,omitempty"`
- Type_ string `json:"Type,omitempty"`
+ Type_ RegistryValueType `json:"Type,omitempty"`
// One and only one value type must be set.
StringValue string `json:"StringValue,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go
new file mode 100644
index 000000000..c8b4f6c95
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go
@@ -0,0 +1,17 @@
+package hcsschema
+
+// NOTE: manually added
+
+type RegistryValueType string
+
+// List of RegistryValueType
+const (
+ RegistryValueType_NONE RegistryValueType = "None"
+ RegistryValueType_STRING RegistryValueType = "String"
+ RegistryValueType_EXPANDED_STRING RegistryValueType = "ExpandedString"
+ RegistryValueType_MULTI_STRING RegistryValueType = "MultiString"
+ RegistryValueType_BINARY RegistryValueType = "Binary"
+ RegistryValueType_D_WORD RegistryValueType = "DWord"
+ RegistryValueType_Q_WORD RegistryValueType = "QWord"
+ RegistryValueType_CUSTOM_TYPE RegistryValueType = "CustomType"
+)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go
new file mode 100644
index 000000000..14f0299e3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type SecuritySettings struct {
+ // Enablement of Trusted Platform Module on the computer system
+ EnableTpm bool `json:"EnableTpm,omitempty"`
+ Isolation *IsolationSettings `json:"Isolation,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go
new file mode 100644
index 000000000..72de80149
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go
@@ -0,0 +1,28 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.1
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type SystemTime struct {
+ Year int32 `json:"Year,omitempty"`
+
+ Month int32 `json:"Month,omitempty"`
+
+ DayOfWeek int32 `json:"DayOfWeek,omitempty"`
+
+ Day int32 `json:"Day,omitempty"`
+
+ Hour int32 `json:"Hour,omitempty"`
+
+ Minute int32 `json:"Minute,omitempty"`
+
+ Second int32 `json:"Second,omitempty"`
+
+ Milliseconds int32 `json:"Milliseconds,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go
new file mode 100644
index 000000000..529743d75
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go
@@ -0,0 +1,26 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.1
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type TimeZoneInformation struct {
+ Bias int32 `json:"Bias,omitempty"`
+
+ StandardName string `json:"StandardName,omitempty"`
+
+ StandardDate *SystemTime `json:"StandardDate,omitempty"`
+
+ StandardBias int32 `json:"StandardBias,omitempty"`
+
+ DaylightName string `json:"DaylightName,omitempty"`
+
+ DaylightDate *SystemTime `json:"DaylightDate,omitempty"`
+
+ DaylightBias int32 `json:"DaylightBias,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go
index 0e48ece50..9228923fe 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go
@@ -12,6 +12,8 @@ package hcsschema
type Uefi struct {
EnableDebugger bool `json:"EnableDebugger,omitempty"`
+ ApplySecureBootTemplate string `json:"ApplySecureBootTemplate,omitempty"`
+
SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"`
BootThis *UefiBootEntry `json:"BootThis,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go
index 2d22b1bcb..1e0fab289 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go
@@ -29,4 +29,8 @@ type VirtualMachine struct {
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
+
+ SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
+
+ DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
new file mode 100644
index 000000000..9ef322f61
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type VirtualPMemMapping struct {
+ HostPath string `json:"HostPath,omitempty"`
+ ImageFormat string `json:"ImageFormat,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
index a634dfc15..a46b0051d 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcs
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 75499c967..81d60ed43 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -1,20 +1,27 @@
+//go:build windows
+
package hcs
import (
"context"
"encoding/json"
"errors"
+ "fmt"
"strings"
"sync"
"syscall"
+ "time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
+ "github.com/Microsoft/hcsshim/internal/jobobject"
"github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/vmcompute"
+ "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -28,9 +35,13 @@ type System struct {
waitBlock chan struct{}
waitError error
exitError error
- os, typ string
+ os, typ, owner string
+ startTime time.Time
}
+var _ cow.Container = &System{}
+var _ cow.ProcessHost = &System{}
+
func newSystem(id string) *System {
return &System{
id: id,
@@ -38,13 +49,18 @@ func newSystem(id string) *System {
}
}
+// Implementation detail for silo naming, this should NOT be relied upon very heavily.
+func siloNameFmt(containerID string) string {
+ return fmt.Sprintf(`\Container_%s`, containerID)
+}
+
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcs::CreateComputeSystem"
// hcsCreateComputeSystemContext is an async operation. Start the outer span
// here to measure the full create time.
- ctx, span := trace.StartSpan(ctx, operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", id))
@@ -78,9 +94,10 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in
}
}
- events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
+ events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber,
+ hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
if err != nil {
- if err == ErrTimeout {
+ if errors.Is(err, ErrTimeout) {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
_ = computeSystem.Terminate(ctx)
@@ -127,6 +144,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
+ computeSystem.owner = strings.ToLower(props.Owner)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
@@ -178,7 +196,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
// hcsStartComputeSystemContext is an async operation. Start the outer span
// here to measure the full start time.
- ctx, span := trace.StartSpan(ctx, operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
@@ -186,16 +204,19 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
+ // prevent starting an exited system because waitblock we do not recreate waitBlock
+ // or rerun waitBackground, so we have no way to be notified of it closing again
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil)
}
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
- events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
+ events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber,
+ hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
-
+ computeSystem.startTime = time.Now()
return nil
}
@@ -211,13 +232,13 @@ func (computeSystem *System) Shutdown(ctx context.Context) error {
operation := "hcs::System::Shutdown"
- if computeSystem.handle == 0 {
+ if computeSystem.handle == 0 || computeSystem.stopped() {
return nil
}
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
- switch err {
+ switch err { //nolint:errorlint
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, err, events)
@@ -232,13 +253,13 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
operation := "hcs::System::Terminate"
- if computeSystem.handle == 0 {
+ if computeSystem.handle == 0 || computeSystem.stopped() {
return nil
}
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
- switch err {
+ switch err { //nolint:errorlint
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, err, events)
@@ -253,12 +274,12 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
// safe to call multiple times.
func (computeSystem *System) waitBackground() {
operation := "hcs::System::waitBackground"
- ctx, span := trace.StartSpan(context.Background(), operation)
+ ctx, span := oc.StartSpan(context.Background(), operation)
defer span.End()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
- switch err {
+ switch err { //nolint:errorlint
case nil:
log.G(ctx).Debug("system exited")
case ErrVmcomputeUnexpectedExit:
@@ -275,24 +296,51 @@ func (computeSystem *System) waitBackground() {
oc.SetSpanStatus(span, err)
}
-// Wait synchronously waits for the compute system to shutdown or terminate. If
-// the compute system has already exited returns the previous error (if any).
-func (computeSystem *System) Wait() error {
- <-computeSystem.waitBlock
+func (computeSystem *System) WaitChannel() <-chan struct{} {
+ return computeSystem.waitBlock
+}
+
+func (computeSystem *System) WaitError() error {
return computeSystem.waitError
}
-// ExitError returns an error describing the reason the compute system terminated.
-func (computeSystem *System) ExitError() error {
+// Wait synchronously waits for the compute system to shutdown or terminate.
+// If the compute system has already exited returns the previous error (if any).
+func (computeSystem *System) Wait() error {
+ return computeSystem.WaitCtx(context.Background())
+}
+
+// WaitCtx synchronously waits for the compute system to shutdown or terminate, or the context to be cancelled.
+//
+// See [System.Wait] for more information.
+func (computeSystem *System) WaitCtx(ctx context.Context) error {
+ select {
+ case <-computeSystem.WaitChannel():
+ return computeSystem.WaitError()
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// stopped returns true if the compute system stopped.
+func (computeSystem *System) stopped() bool {
select {
case <-computeSystem.waitBlock:
- if computeSystem.waitError != nil {
- return computeSystem.waitError
- }
- return computeSystem.exitError
+ return true
default:
+ }
+ return false
+}
+
+// ExitError returns an error describing the reason the compute system terminated.
+func (computeSystem *System) ExitError() error {
+ if !computeSystem.stopped() {
return errors.New("container not exited")
}
+ if computeSystem.waitError != nil {
+ return computeSystem.waitError
+ }
+ return computeSystem.exitError
}
// Properties returns the requested container properties targeting a V1 schema container.
@@ -302,6 +350,10 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
operation := "hcs::System::Properties"
+ if computeSystem.handle == 0 {
+ return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil)
+ }
+
queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types})
if err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
@@ -324,13 +376,125 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
return properties, nil
}
-// PropertiesV2 returns the requested container properties targeting a V2 schema container.
-func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
- computeSystem.handleLock.RLock()
- defer computeSystem.handleLock.RUnlock()
+// queryInProc handles querying for container properties without reaching out to HCS. `props`
+// will be updated to contain any data returned from the queries present in `types`. If any properties
+// failed to be queried they will be tallied up and returned in as the first return value. Failures on
+// query are NOT considered errors; the only failure case for this method is if the containers job object
+// cannot be opened.
+func (computeSystem *System) queryInProc(
+ ctx context.Context,
+ props *hcsschema.Properties,
+ types []hcsschema.PropertyType,
+) ([]hcsschema.PropertyType, error) {
+ // In the future we can make use of some new functionality in the HCS that allows you
+ // to pass a job object for HCS to use for the container. Currently, the only way we'll
+ // be able to open the job/silo is if we're running as SYSTEM.
+ jobOptions := &jobobject.Options{
+ UseNTVariant: true,
+ Name: siloNameFmt(computeSystem.id),
+ }
+ job, err := jobobject.Open(ctx, jobOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer job.Close()
+
+ var fallbackQueryTypes []hcsschema.PropertyType
+ for _, propType := range types {
+ switch propType {
+ case hcsschema.PTStatistics:
+ // Handle a bad caller asking for the same type twice. No use in re-querying if this is
+ // filled in already.
+ if props.Statistics == nil {
+ props.Statistics, err = computeSystem.statisticsInProc(job)
+ if err != nil {
+ log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
+
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+ default:
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+
+ return fallbackQueryTypes, nil
+}
+
+// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
+// change to make grabbing the private working set total much more efficient.
+func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
+ // Start timestamp for these stats before we grab them to match HCS
+ timestamp := time.Now()
+
+ memInfo, err := job.QueryMemoryStats()
+ if err != nil {
+ return nil, err
+ }
+
+ processorInfo, err := job.QueryProcessorStats()
+ if err != nil {
+ return nil, err
+ }
+
+ storageInfo, err := job.QueryStorageStats()
+ if err != nil {
+ return nil, err
+ }
+ // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
+ // with the class SystemProcessInformation which returns an array containing system information for *every*
+ // process running on the machine. They then grab the pids that are running in the container and filter down
+ // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
+ // work well as performance should get worse if more processess are running on the machine in general and not
+ // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
+ // as well which isn't great and is wasted work to fetch.
+ //
+ // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
+ // working set ourselves and ask for everything else separately. The optimization we can make here is
+ // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
+ // the private working set in a more efficient manner by:
+ //
+ // 1. Find the pids running in the silo
+ // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
+ // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
+ // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
+ privateWorkingSet, err := job.QueryPrivateWorkingSet()
+ if err != nil {
+ return nil, err
+ }
+
+ return &hcsschema.Statistics{
+ Timestamp: timestamp,
+ ContainerStartTime: computeSystem.startTime,
+ Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
+ Memory: &hcsschema.MemoryStats{
+ MemoryUsageCommitBytes: memInfo.JobMemory,
+ MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
+ MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
+ },
+ Processor: &hcsschema.ProcessorStats{
+ RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
+ RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
+ TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
+ },
+ Storage: &hcsschema.StorageStats{
+ ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
+ ReadSizeBytes: storageInfo.ReadStats.TotalSize,
+ WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
+ WriteSizeBytes: storageInfo.WriteStats.TotalSize,
+ },
+ }, nil
+}
+
+// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
+func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
operation := "hcs::System::PropertiesV2"
+ if computeSystem.handle == 0 {
+ return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil)
+ }
+
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
if err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
@@ -345,21 +509,75 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
- properties := &hcsschema.Properties{}
- if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
+ props := &hcsschema.Properties{}
+ if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
}
- return properties, nil
+ return props, nil
+}
+
+// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
+func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
+ computeSystem.handleLock.RLock()
+ defer computeSystem.handleLock.RUnlock()
+
+ // Let HCS tally up the total for VM based queries instead of querying ourselves.
+ if computeSystem.typ != "container" {
+ return computeSystem.hcsPropertiesV2Query(ctx, types)
+ }
+
+ // Define a starter Properties struct with the default fields returned from every
+ // query. Owner is only returned from Statistics but it's harmless to include.
+ properties := &hcsschema.Properties{
+ Id: computeSystem.id,
+ SystemType: computeSystem.typ,
+ RuntimeOsType: computeSystem.os,
+ Owner: computeSystem.owner,
+ }
+
+ logEntry := log.G(ctx)
+ // First lets try and query ourselves without reaching to HCS. If any of the queries fail
+ // we'll take note and fallback to querying HCS for any of the failed types.
+ fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
+ if err == nil && len(fallbackTypes) == 0 {
+ return properties, nil
+ } else if err != nil {
+ logEntry = logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
+ fallbackTypes = types
+ }
+
+ logEntry.WithFields(logrus.Fields{
+ logfields.ContainerID: computeSystem.id,
+ "propertyTypes": fallbackTypes,
+ }).Info("falling back to HCS for property type queries")
+
+ hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now add in anything that we might have successfully queried in process.
+ if properties.Statistics != nil {
+ hcsProperties.Statistics = properties.Statistics
+ hcsProperties.Owner = properties.Owner
+ }
+
+ // For future support for querying processlist in-proc as well.
+ if properties.ProcessList != nil {
+ hcsProperties.ProcessList = properties.ProcessList
+ }
+
+ return hcsProperties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Pause(ctx context.Context) (err error) {
operation := "hcs::System::Pause"
- // hcsPauseComputeSystemContext is an async peration. Start the outer span
+ // hcsPauseComputeSystemContext is an async operation. Start the outer span
// here to measure the full pause time.
- ctx, span := trace.StartSpan(ctx, operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
@@ -372,7 +590,8 @@ func (computeSystem *System) Pause(ctx context.Context) (err error) {
}
resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "")
- events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
+ events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber,
+ hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
@@ -386,7 +605,7 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) {
// hcsResumeComputeSystemContext is an async operation. Start the outer span
// here to measure the full restore time.
- ctx, span := trace.StartSpan(ctx, operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
@@ -399,7 +618,8 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) {
}
resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "")
- events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
+ events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber,
+ hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
@@ -411,9 +631,9 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) {
func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) {
operation := "hcs::System::Save"
- // hcsSaveComputeSystemContext is an async peration. Start the outer span
+ // hcsSaveComputeSystemContext is an async operation. Start the outer span
// here to measure the full save time.
- ctx, span := trace.StartSpan(ctx, operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
@@ -431,7 +651,8 @@ func (computeSystem *System) Save(ctx context.Context, options interface{}) (err
}
result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions))
- events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave)
+ events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber,
+ hcsNotificationSystemSaveCompleted, &timeout.SystemSave)
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
@@ -456,6 +677,11 @@ func (computeSystem *System) createProcess(ctx context.Context, operation string
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
events := processHcsResult(ctx, resultJSON)
if err != nil {
+ if v2, ok := c.(*hcsschema.ProcessParameters); ok {
+ operation += ": " + v2.CommandLine
+ } else if v1, ok := c.(*schema1.ProcessConfig); ok {
+ operation += ": " + v1.CommandLine
+ }
return nil, nil, makeSystemError(computeSystem, operation, err, events)
}
@@ -520,9 +746,17 @@ func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
-func (computeSystem *System) Close() (err error) {
+func (computeSystem *System) Close() error {
+ return computeSystem.CloseCtx(context.Background())
+}
+
+// CloseCtx is similar to [System.Close], but accepts a context.
+//
+// The context is used for all operations, including waits, so timeouts/cancellations may prevent
+// proper system cleanup.
+func (computeSystem *System) CloseCtx(ctx context.Context) (err error) {
operation := "hcs::System::Close"
- ctx, span := trace.StartSpan(context.Background(), operation)
+ ctx, span := oc.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
@@ -565,7 +799,8 @@ func (computeSystem *System) registerCallback(ctx context.Context) error {
callbackMap[callbackNumber] = callbackContext
callbackMapLock.Unlock()
- callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber)
+ callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle,
+ notificationWatcherCallback, callbackNumber)
if err != nil {
return err
}
@@ -592,7 +827,7 @@ func (computeSystem *System) unregisterCallback(ctx context.Context) error {
return nil
}
- // hcsUnregisterComputeSystemCallback has its own syncronization
+ // hcsUnregisterComputeSystemCallback has its own synchronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
index 3342e5bb9..76eb2be7c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcs
import (
@@ -12,14 +14,14 @@ import (
"golang.org/x/sys/windows"
)
-// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
+// makeOpenFiles calls winio.NewOpenFile for each handle in a slice but closes all the handles
// if there is an error.
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
fs := make([]io.ReadWriteCloser, len(hs))
for i, h := range hs {
if h != syscall.Handle(0) {
if err == nil {
- fs[i], err = winio.MakeOpenFile(h)
+ fs[i], err = winio.NewOpenFile(windows.Handle(h))
}
if err != nil {
syscall.Close(h)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
index db4e14fdf..3a51ed195 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcs
import (
@@ -7,7 +9,14 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
)
-func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
+func processAsyncHcsResult(
+ ctx context.Context,
+ err error,
+ resultJSON string,
+ callbackNumber uintptr,
+ expectedNotification hcsNotification,
+ timeout *time.Duration,
+) ([]ErrorEvent, error) {
events := processHcsResult(ctx, resultJSON)
if IsPending(err) {
return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout)
@@ -16,7 +25,12 @@ func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, ca
return events, err
}
-func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
+func waitForNotification(
+ ctx context.Context,
+ callbackNumber uintptr,
+ expectedNotification hcsNotification,
+ timeout *time.Duration,
+) error {
callbackMapLock.RLock()
if _, ok := callbackMap[callbackNumber]; !ok {
callbackMapLock.RUnlock()
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go
new file mode 100644
index 000000000..ce7067678
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go
@@ -0,0 +1 @@
+package hcserror
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
index 921c2c855..a70d80da0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
@@ -1,11 +1,13 @@
+//go:build windows
+
package hcserror
import (
+ "errors"
"fmt"
- "syscall"
-)
-const ERROR_GEN_FAILURE = syscall.Errno(31)
+ "golang.org/x/sys/windows"
+)
type HcsError struct {
title string
@@ -30,18 +32,21 @@ func (e *HcsError) Error() string {
func New(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
- if _, ok := err.(*syscall.DLLError); ok {
+ var e *windows.DLLError
+ if errors.As(err, &e) {
return err
}
return &HcsError{title, rest, err}
}
func Win32FromError(err error) uint32 {
- if herr, ok := err.(*HcsError); ok {
+ var herr *HcsError
+ if errors.As(err, &herr) {
return Win32FromError(herr.Err)
}
- if code, ok := err.(syscall.Errno); ok {
+ var code windows.Errno
+ if errors.As(err, &code) {
return uint32(code)
}
- return uint32(ERROR_GEN_FAILURE)
+ return uint32(windows.ERROR_GEN_FAILURE)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go
new file mode 100644
index 000000000..f6d35df0e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go
@@ -0,0 +1 @@
+package hns
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go
index b2e475f53..ec4c907d1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go
@@ -2,7 +2,7 @@ package hns
import "fmt"
-//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hns.go
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go
new file mode 100644
index 000000000..82ca5baef
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go
@@ -0,0 +1,60 @@
+//go:build windows
+
+package hns
+
+import (
+ "encoding/json"
+
+ "github.com/sirupsen/logrus"
+)
+
+// HNSNnvManagementMacAddress represents management mac address
+// which needs to be excluded from VF reassignment
+type HNSNnvManagementMacAddress struct {
+ MacAddress string `json:",omitempty"`
+}
+
+// HNSNnvManagementMacList represents a list of management
+// mac addresses for exclusion from VF reassignment
+type HNSNnvManagementMacList struct {
+ MacAddressList []HNSNnvManagementMacAddress `json:",omitempty"`
+}
+
+// HNSNnvManagementMacRequest makes a HNS call to modify/query NnvManagementMacList
+func HNSNnvManagementMacRequest(method, path, request string) (*HNSNnvManagementMacList, error) {
+ nnvManagementMacList := &HNSNnvManagementMacList{}
+ err := hnsCall(method, "/accelnet/"+path, request, &nnvManagementMacList)
+ if err != nil {
+ return nil, err
+ }
+ return nnvManagementMacList, nil
+}
+
+// Set ManagementMacAddressList by sending "POST" NnvManagementMacRequest to HNS.
+func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMacList, error) {
+ operation := "Set"
+ title := "hcsshim::nnvManagementMacList::" + operation
+ logrus.Debugf(title+" id=%s", nnvManagementMacList.MacAddressList)
+
+ jsonString, err := json.Marshal(nnvManagementMacList)
+ if err != nil {
+ return nil, err
+ }
+ return HNSNnvManagementMacRequest("POST", "", string(jsonString))
+}
+
+// Get ManagementMacAddressList by sending "GET" NnvManagementMacRequest to HNS.
+func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
+ operation := "Get"
+ title := "hcsshim::nnvManagementMacList::" + operation
+ logrus.Debugf(title)
+ return HNSNnvManagementMacRequest("GET", "", "")
+}
+
+// Delete ManagementMacAddressList by sending "DELETE" NnvManagementMacRequest to HNS.
+func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
+ operation := "Delete"
+ title := "hcsshim::nnvManagementMacList::" + operation
+ logrus.Debugf(title)
+ return HNSNnvManagementMacRequest("DELETE", "", "")
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
index b36315a39..a15609abd 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
import (
@@ -8,6 +10,28 @@ import (
"github.com/sirupsen/logrus"
)
+// EndpointState represents the states of an HNS Endpoint lifecycle.
+type EndpointState uint16
+
+// EndpointState const
+// The lifecycle of an Endpoint goes through created, attached, AttachedSharing - endpoint is being shared with other containers,
+// detached, after being attached, degraded and finally destroyed.
+// Note: This attribute is used by calico to define stale containers and is dependent on HNS v1 api, if we move to HNS v2 api we will need
+// to update the current calico code and cordinate the change with calico. Reach out to Microsoft to facilate the change via HNS.
+const (
+ Uninitialized EndpointState = iota
+ Created EndpointState = 1
+ Attached EndpointState = 2
+ AttachedSharing EndpointState = 3
+ Detached EndpointState = 4
+ Degraded EndpointState = 5
+ Destroyed EndpointState = 6
+)
+
+func (es EndpointState) String() string {
+ return [...]string{"Uninitialized", "Created", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es]
+}
+
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:"ID,omitempty"`
@@ -20,6 +44,7 @@ type HNSEndpoint struct {
IPv6Address net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
+ DNSDomain string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
GatewayAddressV6 string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
@@ -30,9 +55,11 @@ type HNSEndpoint struct {
EnableLowMetric bool `json:",omitempty"`
Namespace *Namespace `json:",omitempty"`
EncapOverhead uint16 `json:",omitempty"`
+ SharedContainers []string `json:",omitempty"`
+ State EndpointState `json:",omitempty"`
}
-//SystemType represents the type of the system on which actions are done
+// SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const
@@ -57,6 +84,18 @@ type EndpointResquestResponse struct {
Error string
}
+// EndpointStats is the object that has stats for a given endpoint
+type EndpointStats struct {
+ BytesReceived uint64 `json:"BytesReceived"`
+ BytesSent uint64 `json:"BytesSent"`
+ DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"`
+ DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"`
+ EndpointID string `json:"EndpointId"`
+ InstanceID string `json:"InstanceId"`
+ PacketsReceived uint64 `json:"PacketsReceived"`
+ PacketsSent uint64 `json:"PacketsSent"`
+}
+
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
@@ -79,11 +118,27 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) {
return endpoint, nil
}
+// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID
+func hnsEndpointStatsRequest(id string) (*EndpointStats, error) {
+ var stats EndpointStats
+ err := hnsCall("GET", "/endpointstats/"+id, "", &stats)
+ if err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
// GetHNSEndpointByID get the Endpoint by ID
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
return HNSEndpointRequest("GET", endpointID, "")
}
+// GetHNSEndpointStats get the stats for a n Endpoint by ID
+func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) {
+ return hnsEndpointStatsRequest(endpointID)
+}
+
// GetHNSEndpointByName gets the endpoint filtered by Name
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
hnsResponse, err := HNSListEndpointRequest()
@@ -116,7 +171,6 @@ func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) {
}
return false, nil
-
}
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
@@ -251,7 +305,6 @@ func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
-
}
// HostDetach detaches a nic on the host
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
index 2df4a57f5..e61dc8de6 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
import (
@@ -29,7 +31,7 @@ func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) {
func hnsCall(method, path, request string, returnResponse interface{}) error {
hnsresponse, err := hnsCallRawResponse(method, path, request)
if err != nil {
- return fmt.Errorf("failed during hnsCallRawResponse: %v", err)
+ return fmt.Errorf("failed during hnsCallRawResponse: %w", err)
}
if !hnsresponse.Success {
return fmt.Errorf("hns failed with error : %s", hnsresponse.Error)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go
index a8d8cc56a..464bb8954 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
type HNSGlobals struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
index f12d3ab04..8861faee7 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
@@ -1,13 +1,16 @@
+//go:build windows
+
package hns
import (
"encoding/json"
"errors"
- "github.com/sirupsen/logrus"
"net"
+
+ "github.com/sirupsen/logrus"
)
-// Subnet is assoicated with a network and represents a list
+// Subnet is associated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
@@ -15,7 +18,7 @@ type Subnet struct {
Policies []json.RawMessage `json:",omitempty"`
}
-// MacPool is assoicated with a network and represents a list
+// MacPool is associated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
index 6765aaead..e97e4f631 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
@@ -21,10 +21,11 @@ const (
)
type NatPolicy struct {
- Type PolicyType `json:"Type"`
- Protocol string
- InternalPort uint16
- ExternalPort uint16
+ Type PolicyType `json:"Type"`
+ Protocol string `json:",omitempty"`
+ InternalPort uint16 `json:",omitempty"`
+ ExternalPort uint16 `json:",omitempty"`
+ ExternalPortReserved bool `json:",omitempty"`
}
type QosPolicy struct {
@@ -56,9 +57,10 @@ type PaPolicy struct {
type OutboundNatPolicy struct {
Policy
- VIP string `json:"VIP,omitempty"`
- Exceptions []string `json:"ExceptionList,omitempty"`
- Destinations []string `json:",omitempty"`
+ VIP string `json:"VIP,omitempty"`
+ Exceptions []string `json:"ExceptionList,omitempty"`
+ Destinations []string `json:",omitempty"`
+ MaxPortPoolUsage uint16 `json:",omitempty"`
}
type ProxyPolicy struct {
@@ -88,20 +90,20 @@ const (
type ACLPolicy struct {
Type PolicyType `json:"Type"`
Id string `json:"Id,omitempty"`
- Protocol uint16
- Protocols string `json:"Protocols,omitempty"`
- InternalPort uint16
+ Protocol uint16 `json:",omitempty"`
+ Protocols string `json:"Protocols,omitempty"`
+ InternalPort uint16 `json:",omitempty"`
Action ActionType
Direction DirectionType
- LocalAddresses string
- RemoteAddresses string
- LocalPorts string `json:"LocalPorts,omitempty"`
- LocalPort uint16
- RemotePorts string `json:"RemotePorts,omitempty"`
- RemotePort uint16
+ LocalAddresses string `json:",omitempty"`
+ RemoteAddresses string `json:",omitempty"`
+ LocalPorts string `json:"LocalPorts,omitempty"`
+ LocalPort uint16 `json:",omitempty"`
+ RemotePorts string `json:"RemotePorts,omitempty"`
+ RemotePort uint16 `json:",omitempty"`
RuleType RuleType `json:"RuleType,omitempty"`
- Priority uint16
- ServiceName string
+ Priority uint16 `json:",omitempty"`
+ ServiceName string `json:",omitempty"`
}
type Policy struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
index 31322a681..b98db40e8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go
index d5efba7f2..b9c30b901 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
index d3b04eefe..a64b67923 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hns
import (
@@ -54,7 +56,7 @@ func issueNamespaceRequest(id *string, method, subpath string, request interface
if strings.Contains(err.Error(), "Element not found.") {
return nil, os.ErrNotExist
}
- return nil, fmt.Errorf("%s %s: %s", method, hnspath, err)
+ return nil, fmt.Errorf("%s %s: %w", method, hnspath, err)
}
return &ns, err
}
@@ -84,7 +86,7 @@ func GetNamespaceEndpoints(id string) ([]string, error) {
var endpoint namespaceEndpointRequest
err = json.Unmarshal(rsrc.Data, &endpoint)
if err != nil {
- return nil, fmt.Errorf("unmarshal endpoint: %s", err)
+ return nil, fmt.Errorf("unmarshal endpoint: %w", err)
}
endpoints = append(endpoints, endpoint.ID)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
index 204633a48..11c7e97e3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package hns
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,13 +29,10 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
@@ -62,10 +62,11 @@ func _hnsCall(method string, path string, object string, response **uint16) (hr
}
func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) {
- if hr = procHNSCall.Find(); hr != nil {
+ hr = procHNSCall.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHNSCall.Addr(), uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go
new file mode 100644
index 000000000..cb554867f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go
@@ -0,0 +1 @@
+package interop
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
index 922f7c679..a56469656 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package interop
import (
@@ -5,7 +7,7 @@ import (
"unsafe"
)
-//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go interop.go
//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
index 12b0c71c5..14c750bd8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package interop
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,13 +29,10 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
@@ -43,6 +43,6 @@ var (
)
func coTaskMemFree(buffer unsafe.Pointer) {
- syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0)
+ syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(buffer))
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go
new file mode 100644
index 000000000..34b53d6e4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go
@@ -0,0 +1,8 @@
+// This package provides higher level constructs for the win32 job object API.
+// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
+// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
+// structs and associated limit flags. Whatever is not present from the job object API
+// in golang.org/x/sys/windows is located in /internal/winapi.
+//
+// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
+package jobobject
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
new file mode 100644
index 000000000..eae3cc500
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
@@ -0,0 +1,114 @@
+//go:build windows
+
+package jobobject
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ioInitOnce sync.Once
+ initIOErr error
+ // Global iocp handle that will be re-used for every job object
+ ioCompletionPort windows.Handle
+ // Mapping of job handle to queue to place notifications in.
+ jobMap sync.Map
+)
+
+// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
+type MsgAllProcessesExited struct{}
+
+// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
+// This should not be treated as an error.
+type MsgUnimplemented struct{}
+
+// pollIOCP polls the io completion port forever.
+func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
+ var (
+ overlapped uintptr
+ code uint32
+ key uintptr
+ )
+
+ for {
+ err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to poll for job object message")
+ continue
+ }
+ if val, ok := jobMap.Load(key); ok {
+ msq, ok := val.(*queue.MessageQueue)
+ if !ok {
+ log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
+ continue
+ }
+ notification, err := parseMessage(code, overlapped)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("failed to parse job object message")
+ continue
+ }
+ if err := msq.Enqueue(notification); errors.Is(err, queue.ErrQueueClosed) {
+ // Write will only return an error when the queue is closed.
+ // The only time a queue would ever be closed is when we call `Close` on
+ // the job it belongs to which also removes it from the jobMap, so something
+ // went wrong here. We can't return as this is reading messages for all jobs
+ // so just log it and move on.
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("tried to write to a closed queue")
+ continue
+ }
+ } else {
+ log.G(ctx).Warn("received a message for a job not present in the mapping")
+ }
+ }
+}
+
+func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
+ // Check code and parse out relevant information related to that notification
+ // that we care about. For now all we handle is the message that all processes
+ // in the job have exited.
+ switch code {
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
+ return MsgAllProcessesExited{}, nil
+ // Other messages for completeness and a check to make sure that if we fall
+ // into the default case that this is a code we don't know how to handle.
+ case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
+ case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
+ case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
+ default:
+ return nil, fmt.Errorf("unknown job notification type: %d", code)
+ }
+ return MsgUnimplemented{}, nil
+}
+
+// Assigns an IO completion port to get notified of events for the registered job
+// object.
+func attachIOCP(job windows.Handle, iocp windows.Handle) error {
+ info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
+ CompletionKey: job,
+ CompletionPort: iocp,
+ }
+ _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
new file mode 100644
index 000000000..b505731c3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
@@ -0,0 +1,666 @@
+//go:build windows
+
+package jobobject
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+// JobObject is a high level wrapper around a Windows job object. Holds a handle to
+// the job, a queue to receive iocp notifications about the lifecycle
+// of the job and a mutex for synchronized handle access.
+type JobObject struct {
+ handle windows.Handle
+ // All accesses to this MUST be done atomically except in `Open` as the object
+ // is being created in the function. 1 signifies that this job is currently a silo.
+ silo uint32
+ mq *queue.MessageQueue
+ handleLock sync.RWMutex
+}
+
+// JobLimits represents the resource constraints that can be applied to a job object.
+type JobLimits struct {
+ CPULimit uint32
+ CPUWeight uint32
+ MemoryLimitInBytes uint64
+ MaxIOPS int64
+ MaxBandwidth int64
+}
+
+type CPURateControlType uint32
+
+const (
+ WeightBased CPURateControlType = iota
+ RateBased
+)
+
+// Processor resource controls
+const (
+ cpuLimitMin = 1
+ cpuLimitMax = 10000
+ cpuWeightMin = 1
+ cpuWeightMax = 9
+)
+
+var (
+ ErrAlreadyClosed = errors.New("the handle has already been closed")
+ ErrNotRegistered = errors.New("job is not registered to receive notifications")
+ ErrNotSilo = errors.New("job is not a silo")
+)
+
+// Options represents the set of configurable options when making or opening a job object.
+type Options struct {
+ // `Name` specifies the name of the job object if a named job object is desired.
+ Name string
+ // `Notifications` specifies if the job will be registered to receive notifications.
+ // Defaults to false.
+ Notifications bool
+ // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
+ // Defaults to false.
+ UseNTVariant bool
+ // `Silo` specifies to promote the job to a silo. This additionally sets the flag
+ // JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE as it is required for the upgrade to complete.
+ Silo bool
+ // `IOTracking` enables tracking I/O statistics on the job object. More specifically this
+ // calls SetInformationJobObject with the JobObjectIoAttribution class.
+ EnableIOTracking bool
+}
+
+// Create creates a job object.
+//
+// If options.Name is an empty string, the job will not be assigned a name.
+//
+// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// If `options` is nil, use default option values.
+//
+// Returns a JobObject structure and an error if there is one.
+func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil {
+ options = &Options{}
+ }
+
+ var jobName *winapi.UnicodeString
+ if options.Name != "" {
+ jobName, err = winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var jobHandle windows.Handle
+ if options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: jobName,
+ Attributes: 0,
+ }
+ status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ var jobNameBuf *uint16
+ if jobName != nil && jobName.Buffer != nil {
+ jobNameBuf = jobName.Buffer
+ }
+ jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ if options.EnableIOTracking {
+ if err := enableIOTracking(jobHandle); err != nil {
+ return nil, err
+ }
+ }
+
+ if options.Silo {
+ // This is a required setting for upgrading to a silo.
+ if err := job.SetTerminateOnLastHandleClose(); err != nil {
+ return nil, err
+ }
+ if err := job.PromoteToSilo(); err != nil {
+ return nil, err
+ }
+ }
+
+ return job, nil
+}
+
+// Open opens an existing job object with name provided in `options`. If no name is provided
+// return an error since we need to know what job object to open.
+//
+// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// Returns a JobObject structure and an error if there is one.
+func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil || options.Name == "" {
+ return nil, errors.New("no job object name specified to open")
+ }
+
+ unicodeJobName, err := winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ var jobHandle windows.Handle
+ if options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: unicodeJobName,
+ Attributes: 0,
+ }
+ status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ if isJobSilo(jobHandle) {
+ job.silo = 1
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ return job, nil
+}
+
+// helper function to setup notifications for creating/opening a job object
+func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInitOnce.Do(func() {
+ h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ initIOErr = err
+ return
+ }
+ ioCompletionPort = h
+ go pollIOCP(ctx, h)
+ })
+
+ if initIOErr != nil {
+ return nil, initIOErr
+ }
+
+ mq := queue.NewMessageQueue()
+ jobMap.Store(uintptr(job.handle), mq)
+ if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
+ jobMap.Delete(uintptr(job.handle))
+ return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
+ }
+ return mq, nil
+}
+
+// PollNotification will poll for a job object notification. This call should only be called once
+// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
+// This call will return immediately with error `ErrNotRegistered` if the job was not registered
+// to receive notifications during `Create`. Internally, messages will be queued and there
+// is no worry of messages being dropped.
+func (job *JobObject) PollNotification() (interface{}, error) {
+ if job.mq == nil {
+ return nil, ErrNotRegistered
+ }
+ return job.mq.Dequeue()
+}
+
+// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
+// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
+// has already started running.
+func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := attrList.Update(
+ winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
+ unsafe.Pointer(&job.handle),
+ unsafe.Sizeof(job.handle),
+ ); err != nil {
+ return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
+ }
+
+ return nil
+}
+
+// Close closes the job object handle.
+func (job *JobObject) Close() error {
+ job.handleLock.Lock()
+ defer job.handleLock.Unlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := windows.Close(job.handle); err != nil {
+ return err
+ }
+
+ if job.mq != nil {
+ job.mq.Close()
+ }
+ // Handles now invalid so if the map entry to receive notifications for this job still
+ // exists remove it so we can stop receiving notifications.
+ if _, ok := jobMap.Load(uintptr(job.handle)); ok {
+ jobMap.Delete(uintptr(job.handle))
+ }
+
+ job.handle = 0
+ return nil
+}
+
+// Assign assigns a process to the job object.
+func (job *JobObject) Assign(pid uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if pid == 0 {
+ return errors.New("invalid pid: 0")
+ }
+ hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
+ if err != nil {
+ return err
+ }
+ defer windows.Close(hProc)
+ return windows.AssignProcessToJobObject(job.handle, hProc)
+}
+
+// Terminate terminates the job, essentially calls TerminateProcess on every process in the
+// job.
+func (job *JobObject) Terminate(exitCode uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ return windows.TerminateJobObject(job.handle, exitCode)
+}
+
+// Pids returns all of the process IDs in the job object.
+func (job *JobObject) Pids() ([]uint32, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
+ err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ )
+
+ // This is either the case where there is only one process or no processes in
+ // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
+ // is 1 and just return this, otherwise return an empty slice.
+ if err == nil {
+ if info.NumberOfProcessIdsInList == 1 {
+ return []uint32{uint32(info.ProcessIdList[0])}, nil
+ }
+ // Return empty slice instead of nil to play well with the caller of this.
+ // Do not return an error if no processes are running inside the job
+ return []uint32{}, nil
+ }
+
+ if err != winapi.ERROR_MORE_DATA { //nolint:errorlint
+ return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
+ }
+
+ jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
+ buf := make([]byte, jobBasicProcessIDListSize)
+ if err = winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ unsafe.Pointer(&buf[0]),
+ uint32(len(buf)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
+ }
+
+ bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
+ pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
+ for i, bufPid := range bufInfo.AllPids() {
+ pids[i] = uint32(bufPid)
+ }
+ return pids, nil
+}
+
+// QueryMemoryStats gets the memory stats for the job object.
+func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectMemoryUsageInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryProcessorStats gets the processor stats for the job object.
+func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicAccountingInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
+// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
+// hasn't been called since creation of the job.
+func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
+ }
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectIoAttribution,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
+ }
+ return &info, nil
+}
+
+// ApplyFileBinding makes a file binding using the Bind Filter from target to root. If the job has
+// not been upgraded to a silo this call will fail. The binding is only applied and visible for processes
+// running in the job, any processes on the host or in another job will not be able to see the binding.
+func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if !job.isSilo() {
+ return ErrNotSilo
+ }
+
+ // The parent directory needs to exist for the bind to work. MkdirAll stats and
+ // returns nil if the directory exists internally so we should be fine to mkdirall
+ // every time.
+ if err := os.MkdirAll(filepath.Dir(root), 0); err != nil {
+ return err
+ }
+
+ rootPtr, err := windows.UTF16PtrFromString(root)
+ if err != nil {
+ return err
+ }
+
+ targetPtr, err := windows.UTF16PtrFromString(target)
+ if err != nil {
+ return err
+ }
+
+ flags := winapi.BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING
+ if readOnly {
+ flags |= winapi.BINDFLT_FLAG_READ_ONLY_MAPPING
+ }
+
+ if err := winapi.BfSetupFilter(
+ job.handle,
+ flags,
+ rootPtr,
+ targetPtr,
+ nil,
+ 0,
+ ); err != nil {
+ return fmt.Errorf("failed to bind target %q to root %q for job object: %w", target, root, err)
+ }
+ return nil
+}
+
+// isJobSilo is a helper to determine if a job object that was opened is a silo. This should ONLY be called
+// from `Open` and any callers in this package afterwards should use `job.isSilo()`
+func isJobSilo(h windows.Handle) bool {
+ // None of the information from the structure that this info class expects will be used, this is just used as
+ // the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job
+ // if it's a silo or not. We still need to define the struct layout as expected by Win32, else the struct
+ // alignment might be different and the call will fail.
+ var siloInfo winapi.SILOOBJECT_BASIC_INFORMATION
+ err := winapi.QueryInformationJobObject(
+ h,
+ winapi.JobObjectSiloBasicInformation,
+ unsafe.Pointer(&siloInfo),
+ uint32(unsafe.Sizeof(siloInfo)),
+ nil,
+ )
+ return err == nil
+}
+
+// PromoteToSilo promotes a job object to a silo. There must be no running processess
+// in the job for this to succeed. If the job is already a silo this is a no-op.
+func (job *JobObject) PromoteToSilo() error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if job.isSilo() {
+ return nil
+ }
+
+ pids, err := job.Pids()
+ if err != nil {
+ return err
+ }
+
+ if len(pids) != 0 {
+ return fmt.Errorf("job cannot have running processes to be promoted to a silo, found %d running processes", len(pids))
+ }
+
+ _, err = windows.SetInformationJobObject(
+ job.handle,
+ winapi.JobObjectCreateSilo,
+ 0,
+ 0,
+ )
+ if err != nil {
+ return fmt.Errorf("failed to promote job to silo: %w", err)
+ }
+
+ atomic.StoreUint32(&job.silo, 1)
+ return nil
+}
+
+// isSilo returns if the job object is a silo.
+func (job *JobObject) isSilo() bool {
+ return atomic.LoadUint32(&job.silo) == 1
+}
+
+// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
+// private working set for every process running in the job.
+func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
+ pids, err := job.Pids()
+ if err != nil {
+ return 0, err
+ }
+
+ openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
+ h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
+ if err != nil {
+ // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
+ // case where one of the pids in the job exited before we open.
+ return 0, nil
+ }
+ defer func() {
+ _ = windows.Close(h)
+ }()
+ // Check if the process is actually running in the job still. There's a small chance
+ // that the process could have exited and had its pid re-used between grabbing the pids
+ // in the job and opening the handle to it above.
+ var inJob int32
+ if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
+ // This shouldn't fail unless we have incorrect access rights which we control
+ // here so probably best to error out if this failed.
+ return 0, err
+ }
+ // Don't report stats for this process as it's not running in the job. This shouldn't be
+ // an error condition though.
+ if inJob == 0 {
+ return 0, nil
+ }
+
+ var vmCounters winapi.VM_COUNTERS_EX2
+ status := winapi.NtQueryInformationProcess(
+ h,
+ winapi.ProcessVmCounters,
+ unsafe.Pointer(&vmCounters),
+ uint32(unsafe.Sizeof(vmCounters)),
+ nil,
+ )
+ if !winapi.NTSuccess(status) {
+ return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
+ }
+ return uint64(vmCounters.PrivateWorkingSetSize), nil
+ }
+
+ var jobWorkingSetSize uint64
+ for _, pid := range pids {
+ workingSet, err := openAndQueryWorkingSet(pid)
+ if err != nil {
+ return 0, err
+ }
+ jobWorkingSetSize += workingSet
+ }
+
+ return jobWorkingSetSize, nil
+}
+
+// SetIOTracking enables IO tracking for processes in the job object.
+// This enables use of the QueryStorageStats method.
+func (job *JobObject) SetIOTracking() error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ return enableIOTracking(job.handle)
+}
+
+func enableIOTracking(job windows.Handle) error {
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
+ }
+ if _, err := windows.SetInformationJobObject(
+ job,
+ winapi.JobObjectIoAttribution,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ ); err != nil {
+ return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
new file mode 100644
index 000000000..e3b1a1edc
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
@@ -0,0 +1,324 @@
+//go:build windows
+
+package jobobject
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ memoryLimitMax uint64 = 0xffffffffffffffff
+)
+
+func isFlagSet(flag, controlFlags uint32) bool {
+ return (flag & controlFlags) == flag
+}
+
+// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
+func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
+ // Go through and check what limits were specified and apply them to the job.
+ if limits.MemoryLimitInBytes != 0 {
+ if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
+ return fmt.Errorf("failed to set job object memory limit: %w", err)
+ }
+ }
+
+ if limits.CPULimit != 0 {
+ if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ } else if limits.CPUWeight != 0 {
+ if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ }
+
+ if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
+ if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
+ return fmt.Errorf("failed to set io limit on job object: %w", err)
+ }
+ }
+ return nil
+}
+
+// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
+// all processes in the job on the last open handle being closed.
+func (job *JobObject) SetTerminateOnLastHandleClose() error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ return job.setExtendedInformation(info)
+}
+
+// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
+func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
+ if memoryLimitInBytes >= memoryLimitMax {
+ return errors.New("memory limit specified exceeds the max size")
+ }
+
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+
+ info.JobMemoryLimit = uintptr(memoryLimitInBytes)
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
+ return job.setExtendedInformation(info)
+}
+
+// GetMemoryLimit gets the memory limit in bytes of the job object.
+func (job *JobObject) GetMemoryLimit() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.JobMemoryLimit), nil
+}
+
+// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
+// `rateControlValue` for the job object.
+func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
+ cpuInfo, err := job.getCPURateControlInformation()
+ if err != nil {
+ return err
+ }
+ switch rateControlType {
+ case WeightBased:
+ if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
+ return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
+ cpuInfo.Value = rateControlValue
+ case RateBased:
+ if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
+ return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
+ cpuInfo.Value = rateControlValue
+ default:
+ return errors.New("invalid job object cpu rate control type")
+ }
+ return job.setCPURateControlInfo(cpuInfo)
+}
+
+// GetCPULimit gets the cpu limits for the job object.
+// `rateControlType` is used to indicate what type of cpu limit to query for.
+func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
+ info, err := job.getCPURateControlInformation()
+ if err != nil {
+ return 0, err
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
+ return 0, errors.New("the job does not have cpu rate control enabled")
+ }
+
+ switch rateControlType {
+ case WeightBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
+ }
+ case RateBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
+ }
+ default:
+ return 0, errors.New("invalid job object cpu rate control type")
+ }
+ return info.Value, nil
+}
+
+// SetCPUAffinity sets the processor affinity for the job object.
+// The affinity is passed in as a bitmask.
+func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
+
+ // We really, really shouldn't be running on 32 bit, but just in case (and to satisfy CodeQL) ...
+ const maxUintptr = ^uintptr(0)
+ if affinityBitMask > uint64(maxUintptr) {
+ return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr)
+ }
+
+ info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
+ return job.setExtendedInformation(info)
+}
+
+// GetCPUAffinity gets the processor affinity for the job object.
+// The returned affinity is a bitmask.
+func (job *JobObject) GetCPUAffinity() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.BasicLimitInformation.Affinity), nil
+}
+
+// SetIOLimit sets the IO limits specified on the job object.
+func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
+ ioInfo, err := job.getIOLimit()
+ if err != nil {
+ return err
+ }
+ ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
+ if maxBandwidth != 0 {
+ ioInfo.MaxBandwidth = maxBandwidth
+ }
+ if maxIOPS != 0 {
+ ioInfo.MaxIops = maxIOPS
+ }
+ return job.setIORateControlInfo(ioInfo)
+}
+
+// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
+func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxBandwidth, nil
+}
+
+// GetIOMaxIopsLimit gets the max iops for the job object.
+func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxIops, nil
+}
+
+// Helper function for getting a job object's extended information.
+func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for getting a job object's CPU rate control information.
+func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for setting a job object's extended information.
+func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ uintptr(unsafe.Pointer(info)),
+ uint32(unsafe.Sizeof(*info)),
+ ); err != nil {
+ return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
+ }
+ return nil
+}
+
+// Helper function for querying job handle for IO limit information.
+func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
+ var blockCount uint32 = 1
+
+ if _, err := winapi.QueryIoRateControlInformationJobObject(
+ job.handle,
+ nil,
+ &ioInfo,
+ &blockCount,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
+ return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
+ }
+ return ioInfo, nil
+}
+
+// Helper function for setting a job object's IO rate control information.
+func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
+ return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
+ }
+ return nil
+}
+
+// Helper function for setting a job object's CPU rate control information.
+func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ uintptr(unsafe.Pointer(cpuInfo)),
+ uint32(unsafe.Sizeof(cpuInfo)),
+ ); err != nil {
+ return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go
new file mode 100644
index 000000000..d17d909d9
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go
@@ -0,0 +1,118 @@
+package log
+
+import (
+ "context"
+
+ "github.com/sirupsen/logrus"
+ "go.opencensus.io/trace"
+)
+
+type entryContextKeyType int
+
+const _entryContextKey entryContextKeyType = iota
+
+var (
+ // L is the default, blank logging entry. WithField and co. all return a copy
+ // of the original entry, so this will not leak fields between calls.
+ //
+ // Do NOT modify fields directly, as that will corrupt state for all users and
+ // is not thread safe.
+ // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`.
+ L = logrus.NewEntry(logrus.StandardLogger())
+
+ // G is an alias for GetEntry
+ G = GetEntry
+
+ // S is an alias for SetEntry
+ S = SetEntry
+
+ // U is an alias for UpdateContext
+ U = UpdateContext
+)
+
+// GetEntry returns a `logrus.Entry` stored in the context, if one exists.
+// Otherwise, it returns a default entry that points to the current context.
+//
+// Note: if the a new entry is returned, it will reference the passed in context.
+// However, existing contexts may be stored in parent contexts and additionally reference
+// earlier contexts.
+// Use `UpdateContext` to update the entry and context.
+func GetEntry(ctx context.Context) *logrus.Entry {
+ entry := fromContext(ctx)
+
+ if entry == nil {
+ entry = L.WithContext(ctx)
+ }
+
+ return entry
+}
+
+// SetEntry updates the log entry in the context with the provided fields, and
+// returns both. It is equivalent to:
+//
+// entry := GetEntry(ctx).WithFields(fields)
+// ctx = WithContext(ctx, entry)
+//
+// See WithContext for more information.
+func SetEntry(ctx context.Context, fields logrus.Fields) (context.Context, *logrus.Entry) {
+ e := GetEntry(ctx)
+ if len(fields) > 0 {
+ e = e.WithFields(fields)
+ }
+ return WithContext(ctx, e)
+}
+
+// UpdateContext extracts the log entry from the context, and, if the entry's
+// context points to a parent's of the current context, ands the entry
+// to the most recent context. It is equivalent to:
+//
+// entry := GetEntry(ctx)
+// ctx = WithContext(ctx, entry)
+//
+// This allows the entry to reference the most recent context and any new
+// values (such as span contexts) added to it.
+//
+// See WithContext for more information.
+func UpdateContext(ctx context.Context) context.Context {
+ // there is no way to check its ctx (and not one of its parents) that contains `e`
+ // so, at a slight cost, force add `e` to the context
+ ctx, _ = WithContext(ctx, GetEntry(ctx))
+ return ctx
+}
+
+// WithContext returns a context that contains the provided log entry.
+// The entry can be extracted with `GetEntry` (`G`)
+//
+// The entry in the context is a copy of `entry` (generated by `entry.WithContext`)
+func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) {
+ // regardless of the order, entry.Context != GetEntry(ctx)
+ // here, the returned entry will reference the supplied context
+ entry = entry.WithContext(ctx)
+ ctx = context.WithValue(ctx, _entryContextKey, entry)
+
+ return ctx, entry
+}
+
+// Copy extracts the tracing Span and logging entry from the src Context, if they
+// exist, and adds them to the dst Context.
+//
+// This is useful to share tracing and logging between contexts, but not the
+// cancellation. For example, if the src Context has been cancelled but cleanup
+// operations triggered by the cancellation require a non-cancelled context to
+// execute.
+func Copy(dst context.Context, src context.Context) context.Context {
+ if s := trace.FromContext(src); s != nil {
+ dst = trace.NewContext(dst, s)
+ }
+
+ if e := fromContext(src); e != nil {
+ dst, _ = WithContext(dst, e)
+ }
+
+ return dst
+}
+
+func fromContext(ctx context.Context) *logrus.Entry {
+ e, _ := ctx.Value(_entryContextKey).(*logrus.Entry)
+ return e
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go
new file mode 100644
index 000000000..1ceb26bad
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go
@@ -0,0 +1,115 @@
+package log
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net"
+ "reflect"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+// TimeFormat is [time.RFC3339Nano] with nanoseconds padded using
+// zeros to ensure the formatted time is always the same number of
+// characters.
+// Based on RFC3339NanoFixed from github.com/containerd/log
+const TimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
+
+func FormatTime(t time.Time) string {
+ return t.Format(TimeFormat)
+}
+
+// DurationFormat formats a [time.Duration] log entry.
+//
+// A nil value signals an error with the formatting.
+type DurationFormat func(time.Duration) interface{}
+
+func DurationFormatString(d time.Duration) interface{} { return d.String() }
+func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
+func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
+
+// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
+//
+// See FormatEnabled for more information.
+func FormatIO(ctx context.Context, v interface{}) string {
+ m := make(map[string]string)
+ m["type"] = reflect.TypeOf(v).String()
+
+ switch t := v.(type) {
+ case net.Conn:
+ m["localAddress"] = formatAddr(t.LocalAddr())
+ m["remoteAddress"] = formatAddr(t.RemoteAddr())
+ case interface{ Addr() net.Addr }:
+ m["address"] = formatAddr(t.Addr())
+ default:
+ return Format(ctx, t)
+ }
+
+ return Format(ctx, m)
+}
+
+func formatAddr(a net.Addr) string {
+ return a.Network() + "://" + a.String()
+}
+
+// Format formats an object into a JSON string, without any indendtation or
+// HTML escapes.
+// Context is used to output a log waring if the conversion fails.
+//
+// This is intended primarily for `trace.StringAttribute()`
+func Format(ctx context.Context, v interface{}) string {
+ b, err := encode(v)
+ if err != nil {
+ // logging errors aren't really warning worthy, and can potentially spam a lot of logs out
+ G(ctx).WithFields(logrus.Fields{
+ logrus.ErrorKey: err,
+ "type": fmt.Sprintf("%T", v),
+ }).Debug("could not format value")
+ return ""
+ }
+
+ return string(b)
+}
+
+func encode(v interface{}) (_ []byte, err error) {
+ if m, ok := v.(proto.Message); ok {
+ // use canonical JSON encoding for protobufs (instead of [encoding/json])
+ // https://protobuf.dev/programming-guides/proto3/#json
+ var b []byte
+ b, err = protojson.MarshalOptions{
+ AllowPartial: true,
+ // protobuf defaults to camel case for JSON encoding; use proto field name instead (snake case)
+ UseProtoNames: true,
+ }.Marshal(m)
+ if err == nil {
+ // the protojson marshaller tries to unmarshal anypb.Any fields, which can
+ // fail for types encoded with "github.com/containerd/typeurl/v2"
+ // we can try creating a dedicated protoregistry.MessageTypeResolver that uses typeurl, but, its
+ // more robust to fall back on json marshalling for errors in general
+ return b, nil
+ }
+
+ }
+
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", "")
+
+ if jErr := enc.Encode(v); jErr != nil {
+ if err != nil {
+ // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...)
+ //nolint:errorlint // non-wrapping format verb for fmt.Errorf
+ return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr)
+ }
+ return nil, fmt.Errorf("json encoding: %w", jErr)
+ }
+
+ // encoder.Encode appends a newline to the end
+ return bytes.TrimSpace(buf.Bytes()), nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go
deleted file mode 100644
index ba6b1a4a5..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package log
-
-import (
- "context"
-
- "github.com/sirupsen/logrus"
- "go.opencensus.io/trace"
-)
-
-// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx`
-// contains an OpenCensus `trace.Span`.
-func G(ctx context.Context) *logrus.Entry {
- span := trace.FromContext(ctx)
- if span != nil {
- sctx := span.SpanContext()
- return logrus.WithFields(logrus.Fields{
- "traceID": sctx.TraceID.String(),
- "spanID": sctx.SpanID.String(),
- // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this?
- })
- }
- return logrus.NewEntry(logrus.StandardLogger())
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
new file mode 100644
index 000000000..bb547a329
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
@@ -0,0 +1,173 @@
+package log
+
+import (
+ "bytes"
+ "reflect"
+ "time"
+
+ "github.com/Microsoft/hcsshim/internal/logfields"
+ "github.com/sirupsen/logrus"
+ "go.opencensus.io/trace"
+)
+
+const nullString = "null"
+
+// Hook intercepts and formats a [logrus.Entry] before it logged.
+//
+// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
+// or logs output to a pipe for logging binaries to consume.
+// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
+// by the shim.
+type Hook struct {
+ // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
+ // Variables of [bytes.Buffer] will be converted to []byte.
+ //
+ // Default is false.
+ EncodeAsJSON bool
+
+ // FormatTime specifies the format for [time.Time] variables.
+ // An empty string disables formatting.
+ // When disabled, the fall back will the JSON encoding, if enabled.
+ //
+ // Default is [TimeFormat].
+ TimeFormat string
+
+ // Duration format converts a [time.Duration] fields to an appropriate encoding.
+ // nil disables formatting.
+ // When disabled, the fall back will the JSON encoding, if enabled.
+ //
+ // Default is [DurationFormatString], which appends a duration unit after the value.
+ DurationFormat DurationFormat
+
+ // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
+ // the entry from the span context stored in [logrus.Entry.Context], if it exists.
+ AddSpanContext bool
+}
+
+var _ logrus.Hook = &Hook{}
+
+func NewHook() *Hook {
+ return &Hook{
+ TimeFormat: TimeFormat,
+ DurationFormat: DurationFormatString,
+ AddSpanContext: true,
+ }
+}
+
+func (h *Hook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
+
+func (h *Hook) Fire(e *logrus.Entry) (err error) {
+ // JSON encode, if necessary, then add span information
+ h.encode(e)
+ h.addSpanContext(e)
+
+ return nil
+}
+
+// encode loops through all the fields in the [logrus.Entry] and encodes them according to
+// the settings in [Hook].
+// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
+// fields of type [time.Time].
+//
+// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
+// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
+// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
+//
+// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
+// then this is a no-op.
+func (h *Hook) encode(e *logrus.Entry) {
+ d := e.Data
+
+ formatTime := h.TimeFormat != ""
+ formatDuration := h.DurationFormat != nil
+ if !(h.EncodeAsJSON || formatTime || formatDuration) {
+ return
+ }
+
+ for k, v := range d {
+ // encode types with dedicated formatting options first
+
+ if vv, ok := v.(time.Time); formatTime && ok {
+ d[k] = vv.Format(h.TimeFormat)
+ continue
+ }
+
+ if vv, ok := v.(time.Duration); formatDuration && ok {
+ d[k] = h.DurationFormat(vv)
+ continue
+ }
+
+ // general case JSON encoding
+
+ if !h.EncodeAsJSON {
+ continue
+ }
+
+ switch vv := v.(type) {
+ // built in types
+ // "json" marshals errors as "{}", so leave alone here
+ case bool, string, error, uintptr,
+ int8, int16, int32, int64, int,
+ uint8, uint32, uint64, uint,
+ float32, float64:
+ continue
+
+ // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
+ // may be a binary payload and not representable as a string.
+ // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
+ // so cannot use `vv.Bytes`.
+ // Could move to below the `reflect.Indirect()` call below, but
+ // that would require additional typematching and dereferencing.
+ // Easier to keep these duplicate branches here.
+ case bytes.Buffer:
+ v = vv.Bytes()
+ case *bytes.Buffer:
+ v = vv.Bytes()
+ }
+
+ // dereference pointer or interface variables
+ rv := reflect.Indirect(reflect.ValueOf(v))
+ // check if `v` is a null pointer
+ if !rv.IsValid() {
+ d[k] = nullString
+ continue
+ }
+
+ switch rv.Kind() {
+ case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
+ default:
+ // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
+ // Chan, Func: not supported by json
+ // Interface, Pointer: dereferenced above
+ // UnsafePointer: not supported by json, not safe to de-reference; leave alone
+ continue
+ }
+
+ b, err := encode(v)
+ if err != nil {
+ // Errors are written to stderr (ie, to `panic.log`) and stops the remaining
+ // hooks (ie, exporting to ETW) from firing. So add encoding errors to
+ // the entry data to be written out, but keep on processing.
+ d[k+"-"+logrus.ErrorKey] = err.Error()
+ // keep the original `v` as the value,
+ continue
+ }
+ d[k] = string(b)
+ }
+}
+
+func (h *Hook) addSpanContext(e *logrus.Entry) {
+ ctx := e.Context
+ if !h.AddSpanContext || ctx == nil {
+ return
+ }
+ span := trace.FromContext(ctx)
+ if span == nil {
+ return
+ }
+ sctx := span.SpanContext()
+ e.Data[logfields.TraceID] = sctx.TraceID.String()
+ e.Data[logfields.SpanID] = sctx.SpanID.String()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go b/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go
new file mode 100644
index 000000000..909ba68b2
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go
@@ -0,0 +1,12 @@
+package log
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+type NopFormatter struct{}
+
+var _ logrus.Formatter = NopFormatter{}
+
+// Format does nothing and returns a nil slice.
+func (NopFormatter) Format(*logrus.Entry) ([]byte, error) { return nil, nil }
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
new file mode 100644
index 000000000..5a960e0d3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
@@ -0,0 +1,184 @@
+package log
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "sync/atomic"
+
+ hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
+)
+
+// This package scrubs objects of potentially sensitive information to pass to logging
+
+type genMap = map[string]interface{}
+type scrubberFunc func(genMap) error
+
+const _scrubbedReplacement = ""
+
+var (
+ ErrUnknownType = errors.New("encoded object is of unknown type")
+
+ // case sensitive keywords, so "env" is not a substring on "Environment"
+ _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")}
+
+ _scrub int32
+)
+
+// SetScrubbing enables scrubbing
+func SetScrubbing(enable bool) {
+ v := int32(0) // cant convert from bool to int32 directly
+ if enable {
+ v = 1
+ }
+ atomic.StoreInt32(&_scrub, v)
+}
+
+// IsScrubbingEnabled checks if scrubbing is enabled
+func IsScrubbingEnabled() bool {
+ v := atomic.LoadInt32(&_scrub)
+ return v != 0
+}
+
+// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of
+// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters)
+func ScrubProcessParameters(s string) (string, error) {
+ // todo: deal with v1 ProcessConfig
+ b := []byte(s)
+ if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) {
+ return s, nil
+ }
+
+ pp := hcsschema.ProcessParameters{}
+ if err := json.Unmarshal(b, &pp); err != nil {
+ return "", err
+ }
+ pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
+
+ b, err := encode(pp)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+// ScrubBridgeCreate scrubs requests sent over the bridge of type
+// internal/gcs/protocol.containerCreate wrapping an internal/hcsoci.linuxHostedSystem
+func ScrubBridgeCreate(b []byte) ([]byte, error) {
+ return scrubBytes(b, scrubBridgeCreate)
+}
+
+func scrubBridgeCreate(m genMap) error {
+ if !isRequestBase(m) {
+ return ErrUnknownType
+ }
+ if ss, ok := m["ContainerConfig"]; ok {
+ // ContainerConfig is a json encoded struct passed as a regular string field
+ s, ok := ss.(string)
+ if !ok {
+ return ErrUnknownType
+ }
+ b, err := scrubBytes([]byte(s), scrubLinuxHostedSystem)
+ if err != nil {
+ return err
+ }
+ m["ContainerConfig"] = string(b)
+ return nil
+ }
+ return ErrUnknownType
+}
+
+func scrubLinuxHostedSystem(m genMap) error {
+ if m, ok := index(m, "OciSpecification"); ok { //nolint:govet // shadow
+ if _, ok := m["annotations"]; ok {
+ m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
+ }
+ if m, ok := index(m, "process"); ok { //nolint:govet // shadow
+ if _, ok := m["env"]; ok {
+ m["env"] = []string{_scrubbedReplacement}
+ return nil
+ }
+ }
+ }
+ return ErrUnknownType
+}
+
+// ScrubBridgeExecProcess scrubs requests sent over the bridge of type
+// internal/gcs/protocol.containerExecuteProcess
+func ScrubBridgeExecProcess(b []byte) ([]byte, error) {
+ return scrubBytes(b, scrubExecuteProcess)
+}
+
+func scrubExecuteProcess(m genMap) error {
+ if !isRequestBase(m) {
+ return ErrUnknownType
+ }
+ if m, ok := index(m, "Settings"); ok { //nolint:govet // shadow
+ if ss, ok := m["ProcessParameters"]; ok {
+ // ProcessParameters is a json encoded struct passed as a regular sting field
+ s, ok := ss.(string)
+ if !ok {
+ return ErrUnknownType
+ }
+
+ s, err := ScrubProcessParameters(s)
+ if err != nil {
+ return err
+ }
+
+ m["ProcessParameters"] = s
+ return nil
+ }
+ }
+ return ErrUnknownType
+}
+
+func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
+ if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) {
+ return b, nil
+ }
+
+ m := make(genMap)
+ if err := json.Unmarshal(b, &m); err != nil {
+ return nil, err
+ }
+
+ // could use regexp, but if the env strings contain braces, the regexp fails
+ // parsing into individual structs would require access to private structs
+ if err := scrub(m); err != nil {
+ return nil, err
+ }
+
+ b, err := encode(m)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func isRequestBase(m genMap) bool {
+ // neither of these are (currently) `omitempty`
+ _, a := m["ActivityId"]
+ _, c := m["ContainerId"]
+ return a && c
+}
+
+// combination `m, ok := m[s]` and `m, ok := m.(genMap)`
+func index(m genMap, s string) (genMap, bool) {
+ if m, ok := m[s]; ok {
+ mm, ok := m.(genMap)
+ return mm, ok
+ }
+
+ return m, false
+}
+
+func hasKeywords(b []byte) bool {
+ for _, bb := range _scrubKeywords {
+ if bytes.Contains(b, bb) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
index cf2c166d9..cceb3e2d1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
@@ -3,30 +3,60 @@ package logfields
const (
// Identifiers
+ Name = "name"
+ Namespace = "namespace"
+ Operation = "operation"
+
+ ID = "id"
+ SandboxID = "sid"
ContainerID = "cid"
- UVMID = "uvm-id"
+ ExecID = "eid"
ProcessID = "pid"
+ TaskID = "tid"
+ UVMID = "uvm-id"
+
+ // networking and IO
+
+ File = "file"
+ Path = "path"
+ Bytes = "bytes"
+ Pipe = "pipe"
// Common Misc
- // Timeout represents an operation timeout.
- Timeout = "timeout"
+ Attempt = "attemptNo"
JSON = "json"
+ // Time
+
+ StartTime = "startTime"
+ EndTime = "endTime"
+ Duration = "duration"
+ Timeout = "timeout"
+
// Keys/values
Field = "field"
+ Key = "key"
OCIAnnotation = "oci-annotation"
Value = "value"
+ Options = "options"
// Golang type's
ExpectedType = "expected-type"
Bool = "bool"
+ Int32 = "int32"
Uint32 = "uint32"
Uint64 = "uint64"
// runhcs
VMShimOperation = "vmshim-op"
+
+ // logging and tracing
+
+ TraceID = "traceID"
+ SpanID = "spanID"
+ ParentSpanID = "parentSpanID"
)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
new file mode 100644
index 000000000..6d39ca3bf
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
@@ -0,0 +1,316 @@
+package memory
+
+import (
+ "github.com/pkg/errors"
+)
+
+const (
+ minimumClassSize = MiB
+ maximumClassSize = 4 * GiB
+ memoryClassNumber = 7
+)
+
+var (
+ ErrInvalidMemoryClass = errors.New("invalid memory class")
+ ErrEarlyMerge = errors.New("not all children have been freed")
+ ErrEmptyPoolOperation = errors.New("operation on empty pool")
+)
+
+// GetMemoryClassType returns the minimum memory class type that can hold a device of
+// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset
+// intervals in between, for a total of 7 different classes. This function does not
+// do a validity check
+func GetMemoryClassType(s uint64) classType {
+ s = (s - 1) >> 20
+ memCls := uint32(0)
+ for s > 0 {
+ s = s >> 2
+ memCls++
+ }
+ return classType(memCls)
+}
+
+// GetMemoryClassSize returns size in bytes for a given memory class
+func GetMemoryClassSize(memCls classType) (uint64, error) {
+ if memCls >= memoryClassNumber {
+ return 0, ErrInvalidMemoryClass
+ }
+ return minimumClassSize << (2 * memCls), nil
+}
+
+// region represents a contiguous memory block
+type region struct {
+ // parent region that has been split into 4
+ parent *region
+ class classType
+ // offset represents offset in bytes
+ offset uint64
+}
+
+// memoryPool tracks free and busy (used) memory regions
+type memoryPool struct {
+ free map[uint64]*region
+ busy map[uint64]*region
+}
+
+// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c
+// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory
+// space.
+//
+// There are a total of 7 different region sizes that can be allocated, with the smallest
+// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device).
+//
+// For efficiency and to reduce fragmentation an entire region is allocated when requested.
+// When there's no available region of requested size, we try to allocate more memory for
+// this particular size by splitting the next available larger region into smaller ones, e.g.
+// if there's no region available for size class 0, we try splitting a region from class 1,
+// then class 2 etc, until we are able to do so or hit the upper limit.
+type PoolAllocator struct {
+ pools [memoryClassNumber]*memoryPool
+}
+
+var _ MappedRegion = ®ion{}
+var _ Allocator = &PoolAllocator{}
+
+func (r *region) Offset() uint64 {
+ return r.offset
+}
+
+func (r *region) Size() uint64 {
+ sz, err := GetMemoryClassSize(r.class)
+ if err != nil {
+ panic(err)
+ }
+ return sz
+}
+
+func (r *region) Type() classType {
+ return r.class
+}
+
+func newEmptyMemoryPool() *memoryPool {
+ return &memoryPool{
+ free: make(map[uint64]*region),
+ busy: make(map[uint64]*region),
+ }
+}
+
+func NewPoolMemoryAllocator() PoolAllocator {
+ pa := PoolAllocator{}
+ p := newEmptyMemoryPool()
+ // by default we allocate a single region with maximum possible size (class type)
+ p.free[0] = ®ion{
+ class: memoryClassNumber - 1,
+ offset: 0,
+ }
+ pa.pools[memoryClassNumber-1] = p
+ return pa
+}
+
+// Allocate checks memory region pool for the given `size` and returns a free region with
+// minimal offset, if none available tries expanding matched memory pool.
+//
+// Internally it's done via moving a region from free pool into a busy pool
+func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) {
+ memCls := GetMemoryClassType(size)
+ if memCls >= memoryClassNumber {
+ return nil, ErrInvalidMemoryClass
+ }
+
+ // find region with the smallest offset
+ nextCls, nextOffset, err := pa.findNextOffset(memCls)
+ if err != nil {
+ return nil, err
+ }
+
+ // this means that there are no more regions for the current class, try expanding
+ if nextCls != memCls {
+ if err := pa.split(memCls); err != nil {
+ if errors.Is(err, ErrInvalidMemoryClass) {
+ return nil, ErrNotEnoughSpace
+ }
+ return nil, err
+ }
+ }
+
+ if err := pa.markBusy(memCls, nextOffset); err != nil {
+ return nil, err
+ }
+
+ // by this point memory pool for memCls should have been created,
+ // either prior or during split call
+ if r := pa.pools[memCls].busy[nextOffset]; r != nil {
+ return r, nil
+ }
+
+ return nil, ErrNotEnoughSpace
+}
+
+// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into
+// a bigger one.
+func (pa *PoolAllocator) Release(reg MappedRegion) error {
+ mp := pa.pools[reg.Type()]
+ if mp == nil {
+ return ErrEmptyPoolOperation
+ }
+
+ err := pa.markFree(reg.Type(), reg.Offset())
+ if err != nil {
+ return err
+ }
+
+ n := mp.free[reg.Offset()]
+ if n == nil {
+ return ErrNotAllocated
+ }
+ if err := pa.merge(n.parent); err != nil {
+ if !errors.Is(err, ErrEarlyMerge) {
+ return err
+ }
+ }
+ return nil
+}
+
+// findNextOffset finds next region location for a given memCls
+func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) {
+ for mc := memCls; mc < memoryClassNumber; mc++ {
+ pi := pa.pools[mc]
+ if pi == nil || len(pi.free) == 0 {
+ continue
+ }
+
+ target := uint64(maximumClassSize)
+ for offset := range pi.free {
+ if offset < target {
+ target = offset
+ }
+ }
+ return mc, target, nil
+ }
+ return 0, 0, ErrNotEnoughSpace
+}
+
+// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit
+func (pa *PoolAllocator) split(clsType classType) error {
+ nextClsType := clsType + 1
+ if nextClsType >= memoryClassNumber {
+ return ErrInvalidMemoryClass
+ }
+
+ nextPool := pa.pools[nextClsType]
+ if nextPool == nil {
+ nextPool = newEmptyMemoryPool()
+ pa.pools[nextClsType] = nextPool
+ }
+
+ cls, offset, err := pa.findNextOffset(nextClsType)
+ if err != nil {
+ return err
+ }
+ // not enough memory in the next class, try to recursively expand
+ if cls != nextClsType {
+ if err := pa.split(nextClsType); err != nil {
+ return err
+ }
+ }
+
+ if err := pa.markBusy(nextClsType, offset); err != nil {
+ return err
+ }
+
+ // memCls validity has been checked already, we can ignore the error
+ clsSize, _ := GetMemoryClassSize(clsType)
+
+ nextReg := nextPool.busy[offset]
+ if nextReg == nil {
+ return ErrNotAllocated
+ }
+
+ // expand memCls
+ cp := pa.pools[clsType]
+ if cp == nil {
+ cp = newEmptyMemoryPool()
+ pa.pools[clsType] = cp
+ }
+ // create 4 smaller regions
+ for i := uint64(0); i < 4; i++ {
+ offset := nextReg.offset + i*clsSize
+ reg := ®ion{
+ parent: nextReg,
+ class: clsType,
+ offset: offset,
+ }
+ cp.free[offset] = reg
+ }
+ return nil
+}
+
+func (pa *PoolAllocator) merge(parent *region) error {
+ // nothing to merge
+ if parent == nil {
+ return nil
+ }
+
+ childCls := parent.class - 1
+ childPool := pa.pools[childCls]
+ // no child nodes to merge, try to merge parent
+ if childPool == nil {
+ return pa.merge(parent.parent)
+ }
+
+ childSize, err := GetMemoryClassSize(childCls)
+ if err != nil {
+ return err
+ }
+
+ // check if all the child nodes are free
+ var children []*region
+ for i := uint64(0); i < 4; i++ {
+ child, free := childPool.free[parent.offset+i*childSize]
+ if !free {
+ return ErrEarlyMerge
+ }
+ children = append(children, child)
+ }
+
+ // at this point all the child nodes will be free and we can merge
+ for _, child := range children {
+ delete(childPool.free, child.offset)
+ }
+
+ if err := pa.markFree(parent.class, parent.offset); err != nil {
+ return err
+ }
+
+ return pa.merge(parent.parent)
+}
+
+// markFree internally moves a region with `offset` from busy to free map
+func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error {
+ clsPool := pa.pools[memCls]
+ if clsPool == nil {
+ return ErrEmptyPoolOperation
+ }
+
+ if reg, exists := clsPool.busy[offset]; exists {
+ clsPool.free[offset] = reg
+ delete(clsPool.busy, offset)
+ return nil
+ }
+ return ErrNotAllocated
+}
+
+// markBusy internally moves a region with `offset` from free to busy map
+func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error {
+ clsPool := pa.pools[memCls]
+ if clsPool == nil {
+ return ErrEmptyPoolOperation
+ }
+
+ if reg, exists := clsPool.free[offset]; exists {
+ clsPool.busy[offset] = reg
+ delete(clsPool.free, offset)
+ return nil
+ }
+ return ErrNotAllocated
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go
new file mode 100644
index 000000000..d6cdb8cc4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go
@@ -0,0 +1,28 @@
+package memory
+
+import "github.com/pkg/errors"
+
+type classType uint32
+
+const (
+ MiB = 1024 * 1024
+ GiB = 1024 * MiB
+)
+
+var (
+ ErrNotEnoughSpace = errors.New("not enough space")
+ ErrNotAllocated = errors.New("no memory allocated at the given offset")
+)
+
+// MappedRegion represents a memory block with an offset
+type MappedRegion interface {
+ Offset() uint64
+ Size() uint64
+ Type() classType
+}
+
+// Allocator is an interface for memory allocation
+type Allocator interface {
+ Allocate(uint64) (MappedRegion, error)
+ Release(MappedRegion) error
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
new file mode 100644
index 000000000..bf8186401
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
@@ -0,0 +1,69 @@
+package oc
+
+import (
+ "errors"
+ "io"
+ "net"
+ "os"
+
+ errdefs "github.com/containerd/errdefs/pkg/errgrpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
+// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
+
+func toStatusCode(err error) codes.Code {
+ // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
+ // wraps an error defined in "github.com/containerd/errdefs", or is a
+ // context timeout or cancelled error
+ if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
+ return s.Code()
+ }
+
+ switch {
+ // case isAny(err):
+ // return codes.Cancelled
+ case isAny(err, os.ErrInvalid):
+ return codes.InvalidArgument
+ case isAny(err, os.ErrDeadlineExceeded):
+ return codes.DeadlineExceeded
+ case isAny(err, os.ErrNotExist):
+ return codes.NotFound
+ case isAny(err, os.ErrExist):
+ return codes.AlreadyExists
+ case isAny(err, os.ErrPermission):
+ return codes.PermissionDenied
+ // case isAny(err):
+ // return codes.ResourceExhausted
+ case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
+ return codes.FailedPrecondition
+ // case isAny(err):
+ // return codes.Aborted
+ // case isAny(err):
+ // return codes.OutOfRange
+ // case isAny(err):
+ // return codes.Unimplemented
+ case isAny(err, io.ErrNoProgress):
+ return codes.Internal
+ // case isAny(err):
+ // return codes.Unavailable
+ case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
+ return codes.DataLoss
+ // case isAny(err):
+ // return codes.Unauthenticated
+ default:
+ return codes.Unknown
+ }
+}
+
+// isAny returns true if errors.Is is true for any of the provided errors, errs.
+func isAny(err error, errs ...error) bool {
+ for _, e := range errs {
+ if errors.Is(err, e) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
index f428bdaf7..28f8f43a9 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
@@ -3,19 +3,26 @@ package oc
import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
+ "google.golang.org/grpc/codes"
+
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/logfields"
)
-var _ = (trace.Exporter)(&LogrusExporter{})
+const spanMessage = "Span"
+
+var _errorCodeKey = logrus.ErrorKey + "Code"
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
// `trace.SpanData` to logrus output.
-type LogrusExporter struct {
-}
+type LogrusExporter struct{}
+
+var _ trace.Exporter = &LogrusExporter{}
// ExportSpan exports `s` based on the the following rules:
//
-// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
-// `s.ParentSpanID` for correlation
+// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
+// `s.SpanID`, and `s.ParentSpanID` for correlation
//
// 2. Any calls to .Annotate will not be supported.
//
@@ -23,21 +30,57 @@ type LogrusExporter struct {
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
// providing `s.Status.Message` as the error value.
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
- // Combine all span annotations with traceID, spanID, parentSpanID
- baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
- baseEntry.Data["traceID"] = s.TraceID.String()
- baseEntry.Data["spanID"] = s.SpanID.String()
- baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
- baseEntry.Data["startTime"] = s.StartTime
- baseEntry.Data["endTime"] = s.EndTime
- baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
- baseEntry.Data["name"] = s.Name
- baseEntry.Time = s.StartTime
+ if s.DroppedAnnotationCount > 0 {
+ logrus.WithFields(logrus.Fields{
+ "name": s.Name,
+ logfields.TraceID: s.TraceID.String(),
+ logfields.SpanID: s.SpanID.String(),
+ "dropped": s.DroppedAttributeCount,
+ "maxAttributes": len(s.Attributes),
+ }).Warning("span had dropped attributes")
+ }
+
+ entry := log.L.Dup()
+ // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
+ // error, status code)
+ // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
+ // can skip overhead in entry.WithFields() and add them directly to entry.Data.
+ // Preallocate ahead of time, since we should add, at most, 10 additional entries
+ data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
+
+ // Default log entry may have prexisting/application-wide data
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range s.Attributes {
+ data[k] = v
+ }
+
+ data[logfields.Name] = s.Name
+ data[logfields.TraceID] = s.TraceID.String()
+ data[logfields.SpanID] = s.SpanID.String()
+ data[logfields.ParentSpanID] = s.ParentSpanID.String()
+ data[logfields.StartTime] = s.StartTime
+ data[logfields.EndTime] = s.EndTime
+ data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
+ if sk := spanKindToString(s.SpanKind); sk != "" {
+ data["spanKind"] = sk
+ }
level := logrus.InfoLevel
if s.Status.Code != 0 {
level = logrus.ErrorLevel
- baseEntry.Data[logrus.ErrorKey] = s.Status.Message
+
+ // don't overwrite an existing "error" or "errorCode" attributes
+ if _, ok := data[logrus.ErrorKey]; !ok {
+ data[logrus.ErrorKey] = s.Status.Message
+ }
+ if _, ok := data[_errorCodeKey]; !ok {
+ data[_errorCodeKey] = codes.Code(s.Status.Code).String()
+ }
}
- baseEntry.Log(level, "Span")
+
+ entry.Data = data
+ entry.Time = s.StartTime
+ entry.Log(level, spanMessage)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
index fee4765cb..726078432 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
@@ -1,17 +1,58 @@
package oc
import (
+ "context"
+
+ "github.com/Microsoft/hcsshim/internal/log"
"go.opencensus.io/trace"
)
+var DefaultSampler = trace.AlwaysSample()
+
// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If
// `err` is `nil` assumes `trace.StatusCodeOk`.
func SetSpanStatus(span *trace.Span, err error) {
status := trace.Status{}
if err != nil {
- // TODO: JTERRY75 - Handle errors in a non-generic way
- status.Code = trace.StatusCodeUnknown
+ status.Code = int32(toStatusCode(err))
status.Message = err.Error()
}
span.SetStatus(status)
}
+
+// StartSpan wraps "go.opencensus.io/trace".StartSpan, but, if the span is sampling,
+// adds a log entry to the context that points to the newly created span.
+func StartSpan(ctx context.Context, name string, o ...trace.StartOption) (context.Context, *trace.Span) {
+ ctx, s := trace.StartSpan(ctx, name, o...)
+ return update(ctx, s)
+}
+
+// StartSpanWithRemoteParent wraps "go.opencensus.io/trace".StartSpanWithRemoteParent.
+//
+// See StartSpan for more information.
+func StartSpanWithRemoteParent(ctx context.Context, name string, parent trace.SpanContext, o ...trace.StartOption) (context.Context, *trace.Span) {
+ ctx, s := trace.StartSpanWithRemoteParent(ctx, name, parent, o...)
+ return update(ctx, s)
+}
+
+func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
+ if s.IsRecordingEvents() {
+ ctx = log.UpdateContext(ctx)
+ }
+
+ return ctx, s
+}
+
+var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
+var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
+
+func spanKindToString(sk int) string {
+ switch sk {
+ case trace.SpanKindClient:
+ return "client"
+ case trace.SpanKindServer:
+ return "server"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go
new file mode 100644
index 000000000..4f441803b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go
@@ -0,0 +1,76 @@
+package guestrequest
+
+// These are constants for v2 schema modify requests.
+
+type RequestType string
+type ResourceType string
+
+// RequestType const.
+const (
+ RequestTypeAdd RequestType = "Add"
+ RequestTypeRemove RequestType = "Remove"
+ RequestTypePreAdd RequestType = "PreAdd" // For networking
+ RequestTypeUpdate RequestType = "Update"
+)
+
+type SignalValueWCOW string
+
+const (
+ SignalValueWCOWCtrlC SignalValueWCOW = "CtrlC"
+ SignalValueWCOWCtrlBreak SignalValueWCOW = "CtrlBreak"
+ SignalValueWCOWCtrlClose SignalValueWCOW = "CtrlClose"
+ SignalValueWCOWCtrlLogOff SignalValueWCOW = "CtrlLogOff"
+ SignalValueWCOWCtrlShutdown SignalValueWCOW = "CtrlShutdown"
+)
+
+// ModificationRequest is for modify commands passed to the guest.
+type ModificationRequest struct {
+ RequestType RequestType `json:"RequestType,omitempty"`
+ ResourceType ResourceType `json:"ResourceType,omitempty"`
+ Settings interface{} `json:"Settings,omitempty"`
+}
+
+type NetworkModifyRequest struct {
+ AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck
+ RequestType RequestType `json:"RequestType,omitempty"`
+ Settings interface{} `json:"Settings,omitempty"`
+}
+
+type RS4NetworkModifyRequest struct {
+ AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck
+ RequestType RequestType `json:"RequestType,omitempty"`
+ Settings interface{} `json:"Settings,omitempty"`
+}
+
+var (
+ // V5 GUIDs for SCSI controllers
+ // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4"
+ // and index as names. For example, first GUID is created like this:
+ // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0"))
+ ScsiControllerGuids = []string{
+ "df6d0690-79e5-55b6-a5ec-c1e2f77f580a",
+ "0110f83b-de10-5172-a266-78bca56bf50a",
+ "b5d2d8d4-3a75-51bf-945b-3444dc6b8579",
+ "305891a9-b251-5dfe-91a2-c25d9212275b",
+ }
+)
+
+// constants for v2 schema ProcessModifyRequest
+
+// Operation type for [hcsschema.ProcessModifyRequest].
+type ProcessModifyOperation string
+
+const (
+ ModifyProcessConsoleSize ProcessModifyOperation = "ConsoleSize"
+ CloseProcessHandle ProcessModifyOperation = "CloseHandle"
+)
+
+// Standard IO handle(s) to close for [hcsschema.CloseHandle] in [hcsschema.ProcessModifyRequest].
+type STDIOHandle string
+
+const (
+ STDInHandle STDIOHandle = "StdIn"
+ STDOutHandle STDIOHandle = "StdOut"
+ STDErrHandle STDIOHandle = "StdErr"
+ AllHandles STDIOHandle = "All"
+)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
new file mode 100644
index 000000000..4eb9bb9f1
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
@@ -0,0 +1,92 @@
+package queue
+
+import (
+ "errors"
+ "sync"
+)
+
+var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
+
+// MessageQueue represents a threadsafe message queue to be used to retrieve or
+// write messages to.
+type MessageQueue struct {
+ m *sync.RWMutex
+ c *sync.Cond
+ messages []interface{}
+ closed bool
+}
+
+// NewMessageQueue returns a new MessageQueue.
+func NewMessageQueue() *MessageQueue {
+ m := &sync.RWMutex{}
+ return &MessageQueue{
+ m: m,
+ c: sync.NewCond(m),
+ messages: []interface{}{},
+ }
+}
+
+// Enqueue writes `msg` to the queue.
+func (mq *MessageQueue) Enqueue(msg interface{}) error {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ if mq.closed {
+ return ErrQueueClosed
+ }
+ mq.messages = append(mq.messages, msg)
+ // Signal a waiter that there is now a value available in the queue.
+ mq.c.Signal()
+ return nil
+}
+
+// Dequeue will read a value from the queue and remove it. If the queue
+// is empty, this will block until the queue is closed or a value gets enqueued.
+func (mq *MessageQueue) Dequeue() (interface{}, error) {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ for !mq.closed && mq.size() == 0 {
+ mq.c.Wait()
+ }
+
+ // We got woken up, check if it's because the queue got closed.
+ if mq.closed {
+ return nil, ErrQueueClosed
+ }
+
+ val := mq.messages[0]
+ mq.messages[0] = nil
+ mq.messages = mq.messages[1:]
+ return val, nil
+}
+
+// Size returns the size of the queue.
+func (mq *MessageQueue) Size() int {
+ mq.m.RLock()
+ defer mq.m.RUnlock()
+ return mq.size()
+}
+
+// Nonexported size check to check if the queue is empty inside already locked functions.
+func (mq *MessageQueue) size() int {
+ return len(mq.messages)
+}
+
+// Close closes the queue for future writes or reads. Any attempts to read or write from the
+// queue after close will return ErrQueueClosed. This is safe to call multiple times.
+func (mq *MessageQueue) Close() {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ // Already closed, noop
+ if mq.closed {
+ return
+ }
+
+ mq.messages = nil
+ mq.closed = true
+ // If there's anybody currently waiting on a value from Dequeue, we need to
+ // broadcast so the read(s) can return ErrQueueClosed.
+ mq.c.Broadcast()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go
new file mode 100644
index 000000000..f211d25e7
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go
@@ -0,0 +1 @@
+package safefile
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
index 66b8d7e03..b087b9879 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package safefile
import (
@@ -156,7 +158,6 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)}
}
-
} else {
parent = newroot
}
@@ -242,7 +243,7 @@ func RemoveRelative(path string, root *os.File) error {
if err == nil {
defer f.Close()
err = deleteOnClose(f)
- if err == syscall.ERROR_ACCESS_DENIED {
+ if err == syscall.ERROR_ACCESS_DENIED { //nolint:errorlint
// Maybe the file is marked readonly. Clear the bit and retry.
_ = clearReadOnly(f)
err = deleteOnClose(f)
@@ -275,7 +276,7 @@ func RemoveAllRelative(path string, root *os.File) error {
}
// It is necessary to use os.Open as Readdirnames does not work with
- // OpenRelative. This is safe because the above lstatrelative fails
+ // OpenRelative. This is safe because the above LstatRelative fails
// if the target is outside the root, and we know this is not a
// symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check.
fd, err := os.Open(filepath.Join(root.Name(), path))
@@ -292,12 +293,12 @@ func RemoveAllRelative(path string, root *os.File) error {
for {
names, err1 := fd.Readdirnames(100)
for _, name := range names {
- err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root)
- if err == nil {
- err = err1
+ if err2 := RemoveAllRelative(path+string(os.PathSeparator)+name, root); err == nil {
+ err = err2
}
}
if err1 == io.EOF {
+ // Readdirnames has no more files to return
break
}
// If Readdirnames returned an error, use it.
@@ -339,6 +340,33 @@ func MkdirRelative(path string, root *os.File) error {
return err
}
+// MkdirAllRelative creates each directory in the path relative to a root, failing if
+// any existing intermediate path components are reparse points.
+func MkdirAllRelative(path string, root *os.File) error {
+ pathParts := strings.Split(filepath.Clean(path), (string)(filepath.Separator))
+ for index := range pathParts {
+ partialPath := filepath.Join(pathParts[0 : index+1]...)
+ stat, err := LstatRelative(partialPath, root)
+
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err := MkdirRelative(partialPath, root); err != nil {
+ return err
+ }
+ continue
+ }
+ return err
+ }
+
+ if !stat.IsDir() {
+ fullPath := filepath.Join(root.Name(), partialPath)
+ return &os.PathError{Op: "mkdir", Path: fullPath, Err: syscall.ENOTDIR}
+ }
+ }
+
+ return nil
+}
+
// LstatRelative performs a stat operation on a file relative to a root, failing
// if any intermediate path components are reparse points.
func LstatRelative(path string, root *os.File) (os.FileInfo, error) {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
new file mode 100644
index 000000000..7dfa1e594
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
@@ -0,0 +1,186 @@
+//go:build windows
+// +build windows
+
+package security
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+type (
+ accessMask uint32
+ accessMode uint32
+ desiredAccess uint32
+ inheritMode uint32
+ objectType uint32
+ shareMode uint32
+ securityInformation uint32
+ trusteeForm uint32
+ trusteeType uint32
+)
+
+type explicitAccess struct {
+ accessPermissions accessMask
+ accessMode accessMode
+ inheritance inheritMode
+ trustee trustee
+}
+
+type trustee struct {
+ multipleTrustee *trustee
+ multipleTrusteeOperation int32
+ trusteeForm trusteeForm
+ trusteeType trusteeType
+ name uintptr
+}
+
+const (
+ AccessMaskNone accessMask = 0
+ AccessMaskRead accessMask = 1 << 31 // GENERIC_READ
+ AccessMaskWrite accessMask = 1 << 30 // GENERIC_WRITE
+ AccessMaskExecute accessMask = 1 << 29 // GENERIC_EXECUTE
+ AccessMaskAll accessMask = 1 << 28 // GENERIC_ALL
+
+ accessMaskDesiredPermission = AccessMaskRead
+
+ accessModeGrant accessMode = 1
+
+ desiredAccessReadControl desiredAccess = 0x20000
+ desiredAccessWriteDac desiredAccess = 0x40000
+
+ gvmga = "GrantVmGroupAccess:"
+
+ inheritModeNoInheritance inheritMode = 0x0
+ inheritModeSubContainersAndObjectsInherit inheritMode = 0x3
+
+ objectTypeFileObject objectType = 0x1
+
+ securityInformationDACL securityInformation = 0x4
+
+ shareModeRead shareMode = 0x1
+ shareModeWrite shareMode = 0x2
+
+ //nolint:stylecheck // ST1003
+ sidVmGroup = "S-1-5-83-0"
+
+ trusteeFormIsSid trusteeForm = 0
+
+ trusteeTypeWellKnownGroup trusteeType = 5
+)
+
+// GrantVmGroupAccess sets the DACL for a specified file or directory to
+// include Grant ACE entries for the VM Group SID. This is a golang re-
+// implementation of the same function in vmcompute, just not exported in
+// RS5. Which kind of sucks. Sucks a lot :/
+func GrantVmGroupAccess(name string) error { //nolint:stylecheck // ST1003
+ return GrantVmGroupAccessWithMask(name, accessMaskDesiredPermission)
+}
+
+// GrantVmGroupAccessWithMask sets the desired DACL for a specified file or
+// directory.
+func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:stylecheck // ST1003
+ if access == 0 || access<<4 != 0 {
+ return fmt.Errorf("invalid access mask: 0x%08x", access)
+ }
+ // Stat (to determine if `name` is a directory).
+ s, err := os.Stat(name)
+ if err != nil {
+ return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
+ }
+
+ // Get a handle to the file/directory. Must defer Close on success.
+ fd, err := createFile(name, s.IsDir())
+ if err != nil {
+ return err // Already wrapped
+ }
+ defer func() {
+ _ = syscall.CloseHandle(fd)
+ }()
+
+ // Get the current DACL and Security Descriptor. Must defer LocalFree on success.
+ ot := objectTypeFileObject
+ si := securityInformationDACL
+ sd := uintptr(0)
+ origDACL := uintptr(0)
+ if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
+ return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
+ }
+ defer func() {
+ _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
+ }()
+
+ // Generate a new DACL which is the current DACL with the required ACEs added.
+ // Must defer LocalFree on success.
+ newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), access, origDACL)
+ if err != nil {
+ return err // Already wrapped
+ }
+ defer func() {
+ _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
+ }()
+
+ // And finally use SetSecurityInfo to apply the updated DACL.
+ if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
+ return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
+ }
+
+ return nil
+}
+
+// createFile is a helper function to call [Nt]CreateFile to get a handle to
+// the file or directory.
+func createFile(name string, isDir bool) (syscall.Handle, error) {
+ namep, err := syscall.UTF16FromString(name)
+ if err != nil {
+ return 0, fmt.Errorf("syscall.UTF16FromString %s: %w", name, err)
+ }
+ da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
+ sm := uint32(shareModeRead | shareModeWrite)
+ fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
+ if isDir {
+ fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
+ }
+ fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
+ if err != nil {
+ return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
+ }
+ return fd, nil
+}
+
+// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added.
+// The caller is responsible for LocalFree of the returned DACL on success.
+func generateDACLWithAcesAdded(name string, isDir bool, desiredAccess accessMask, origDACL uintptr) (uintptr, error) {
+ // Generate pointers to the SIDs based on the string SIDs
+ sid, err := syscall.StringToSid(sidVmGroup)
+ if err != nil {
+ return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
+ }
+
+ inheritance := inheritModeNoInheritance
+ if isDir {
+ inheritance = inheritModeSubContainersAndObjectsInherit
+ }
+
+ eaArray := []explicitAccess{
+ {
+ accessPermissions: desiredAccess,
+ accessMode: accessModeGrant,
+ inheritance: inheritance,
+ trustee: trustee{
+ trusteeForm: trusteeFormIsSid,
+ trusteeType: trusteeTypeWellKnownGroup,
+ name: uintptr(unsafe.Pointer(sid)),
+ },
+ },
+ }
+
+ modifiedDACL := uintptr(0)
+ if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
+ return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
+ }
+
+ return modifiedDACL, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go
new file mode 100644
index 000000000..71326e4e4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go
@@ -0,0 +1,7 @@
+package security
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go
+
+//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo
+//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo
+//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go
new file mode 100644
index 000000000..395f54687
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go
@@ -0,0 +1,69 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package security
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+
+ procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
+ procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
+ procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
+)
+
+func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go
new file mode 100644
index 000000000..9dd00c812
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go
@@ -0,0 +1 @@
+package vmcompute
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
index e7f114b67..67ca897cf 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package vmcompute
import (
@@ -5,15 +7,17 @@ import (
"syscall"
"time"
+ "github.com/sirupsen/logrus"
+ "go.opencensus.io/trace"
+
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/timeout"
- "go.opencensus.io/trace"
)
-//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go vmcompute.go
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
@@ -62,7 +66,7 @@ type HcsCallback syscall.Handle
type HcsProcessInformation struct {
// ProcessId is the pid of the created process.
ProcessId uint32
- reserved uint32 //nolint:structcheck
+ _ uint32 // reserved padding
// StdInput is the handle associated with the stdin of the process.
StdInput syscall.Handle
// StdOutput is the handle associated with the stdout of the process.
@@ -72,21 +76,39 @@ type HcsProcessInformation struct {
}
func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error {
+ now := time.Now()
if timeout > 0 {
var cancel gcontext.CancelFunc
ctx, cancel = gcontext.WithTimeout(ctx, timeout)
defer cancel()
}
+ // if ctx already has prior deadlines, the shortest timeout takes precedence and is used.
+ // find the true timeout for reporting
+ //
+ // this is mostly an issue with (*UtilityVM).Start(context.Context), which sets its
+ // own (2 minute) timeout.
+ deadline, ok := ctx.Deadline()
+ trueTimeout := timeout
+ if ok {
+ trueTimeout = deadline.Sub(now)
+ log.G(ctx).WithFields(logrus.Fields{
+ logfields.Timeout: trueTimeout,
+ "desiredTimeout": timeout,
+ }).Trace("Executing syscall with deadline")
+ }
+
done := make(chan error, 1)
go func() {
done <- f()
}()
select {
case <-ctx.Done():
- if ctx.Err() == gcontext.DeadlineExceeded {
- log.G(ctx).WithField(logfields.Timeout, timeout).
- Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
+ if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint
+ log.G(ctx).WithField(logfields.Timeout, trueTimeout).
+ Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " +
+ "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " +
+ "stuck in the platform API for a significant length of time.")
}
return ctx.Err()
case err := <-done:
@@ -95,7 +117,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error
}
func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems")
+ ctx, span := oc.StartSpan(ctx, "HcsEnumerateComputeSystems")
defer span.End()
defer func() {
if result != "" {
@@ -122,13 +144,13 @@ func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSyst
}
func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsCreateComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -147,7 +169,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin
}
func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsOpenComputeSystem")
defer span.End()
defer func() {
if result != "" {
@@ -167,7 +189,7 @@ func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSys
}
func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsCloseComputeSystem")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -177,13 +199,13 @@ func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr er
}
func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsStartComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -200,13 +222,13 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option
}
func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsShutdownComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -223,13 +245,13 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt
}
func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsTerminateComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -246,13 +268,13 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op
}
func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsPauseComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -269,13 +291,13 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option
}
func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsResumeComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
@@ -292,7 +314,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio
}
func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties")
+ ctx, span := oc.StartSpan(ctx, "HcsGetComputeSystemProperties")
defer span.End()
defer func() {
if result != "" {
@@ -319,7 +341,7 @@ func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem
}
func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsModifyComputeSystem")
defer span.End()
defer func() {
if result != "" {
@@ -340,7 +362,7 @@ func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, confi
}
func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings")
+ ctx, span := oc.StartSpan(ctx, "HcsModifyServiceSettings")
defer span.End()
defer func() {
if result != "" {
@@ -361,7 +383,7 @@ func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result str
}
func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback")
+ ctx, span := oc.StartSpan(ctx, "HcsRegisterComputeSystemCallback")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -371,7 +393,7 @@ func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSys
}
func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback")
+ ctx, span := oc.StartSpan(ctx, "HcsUnregisterComputeSystemCallback")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -381,7 +403,7 @@ func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle Hcs
}
func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsCreateProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsCreateProcess")
defer span.End()
defer func() {
if result != "" {
@@ -389,7 +411,12 @@ func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processPara
}
oc.SetSpanStatus(span, hr)
}()
- span.AddAttributes(trace.StringAttribute("processParameters", processParameters))
+ if span.IsRecordingEvents() {
+ // wont handle v1 process parameters
+ if s, err := log.ScrubProcessParameters(processParameters); err == nil {
+ span.AddAttributes(trace.StringAttribute("processParameters", s))
+ }
+ }
return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error {
var resultp *uint16
@@ -402,7 +429,7 @@ func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processPara
}
func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsOpenProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsOpenProcess")
defer span.End()
defer func() {
if result != "" {
@@ -423,7 +450,7 @@ func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (
}
func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsCloseProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsCloseProcess")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -433,7 +460,7 @@ func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) {
}
func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsTerminateProcess")
defer span.End()
defer func() {
if result != "" {
@@ -453,7 +480,7 @@ func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result strin
}
func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsSignalProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsSignalProcess")
defer span.End()
defer func() {
if result != "" {
@@ -474,7 +501,7 @@ func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string)
}
func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo")
+ ctx, span := oc.StartSpan(ctx, "HcsGetProcessInfo")
defer span.End()
defer func() {
if result != "" {
@@ -494,7 +521,7 @@ func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInforma
}
func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties")
+ ctx, span := oc.StartSpan(ctx, "HcsGetProcessProperties")
defer span.End()
defer func() {
if result != "" {
@@ -520,7 +547,7 @@ func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processP
}
func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsModifyProcess")
+ ctx, span := oc.StartSpan(ctx, "HcsModifyProcess")
defer span.End()
defer func() {
if result != "" {
@@ -541,7 +568,7 @@ func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string)
}
func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties")
+ ctx, span := oc.StartSpan(ctx, "HcsGetServiceProperties")
defer span.End()
defer func() {
if result != "" {
@@ -568,7 +595,7 @@ func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (proper
}
func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback")
+ ctx, span := oc.StartSpan(ctx, "HcsRegisterProcessCallback")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -578,7 +605,7 @@ func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callba
}
func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback")
+ ctx, span := oc.StartSpan(ctx, "HcsUnregisterProcessCallback")
defer span.End()
defer func() { oc.SetSpanStatus(span, hr) }()
@@ -588,13 +615,13 @@ func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallba
}
func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) {
- ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem")
+ ctx, span := oc.StartSpan(ctx, "HcsSaveComputeSystem")
defer span.End()
defer func() {
if result != "" {
span.AddAttributes(trace.StringAttribute("result", result))
}
- if hr != errVmcomputeOperationPending {
+ if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
oc.SetSpanStatus(span, hr)
}
}()
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
index cae55058d..67779de50 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package vmcompute
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,61 +29,65 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
var (
modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
- procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems")
- procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem")
- procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem")
procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem")
- procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem")
- procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem")
- procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem")
- procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem")
- procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
+ procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess")
+ procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem")
+ procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
+ procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems")
procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties")
+ procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
+ procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
+ procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem")
+ procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings")
+ procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem")
+ procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess")
+ procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem")
procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback")
- procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
+ procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
+ procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem")
- procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
- procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess")
- procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess")
- procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess")
+ procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem")
procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess")
- procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
- procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
- procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
- procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
- procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
+ procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem")
+ procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem")
+ procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess")
+ procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
)
-func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(query)
+func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) {
+ hr = procHcsCloseComputeSystem.Find()
if hr != nil {
return
}
- return _hcsEnumerateComputeSystems(_p0, computeSystems, result)
+ r0, _, _ := syscall.SyscallN(procHcsCloseComputeSystem.Addr(), uintptr(computeSystem))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
}
-func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) {
- if hr = procHcsEnumerateComputeSystems.Find(); hr != nil {
+func hcsCloseProcess(process HcsProcess) (hr error) {
+ hr = procHcsCloseProcess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsCloseProcess.Addr(), uintptr(process))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -105,10 +112,11 @@ func hcsCreateComputeSystem(id string, configuration string, identity syscall.Ha
}
func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) {
- if hr = procHcsCreateComputeSystem.Find(); hr != nil {
+ hr = procHcsCreateComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procHcsCreateComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -118,34 +126,21 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
return
}
-func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) {
+func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(id)
+ _p0, hr = syscall.UTF16PtrFromString(processParameters)
if hr != nil {
return
}
- return _hcsOpenComputeSystem(_p0, computeSystem, result)
-}
-
-func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) {
- if hr = procHcsOpenComputeSystem.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
- if int32(r0) < 0 {
- if r0&0x1fff0000 == 0x00070000 {
- r0 &= 0xffff
- }
- hr = syscall.Errno(r0)
- }
- return
+ return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result)
}
-func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) {
- if hr = procHcsCloseComputeSystem.Find(); hr != nil {
+func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) {
+ hr = procHcsCreateProcess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsCreateProcess.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -155,20 +150,21 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) {
return
}
-func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(options)
+ _p0, hr = syscall.UTF16PtrFromString(query)
if hr != nil {
return
}
- return _hcsStartComputeSystem(computeSystem, _p0, result)
+ return _hcsEnumerateComputeSystems(_p0, computeSystems, result)
}
-func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsStartComputeSystem.Find(); hr != nil {
+func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) {
+ hr = procHcsEnumerateComputeSystems.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsEnumerateComputeSystems.Addr(), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -178,20 +174,21 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u
return
}
-func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(options)
+ _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
if hr != nil {
return
}
- return _hcsShutdownComputeSystem(computeSystem, _p0, result)
+ return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result)
}
-func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsShutdownComputeSystem.Find(); hr != nil {
+func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
+ hr = procHcsGetComputeSystemProperties.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsGetComputeSystemProperties.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -201,20 +198,12 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result
return
}
-func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(options)
+func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) {
+ hr = procHcsGetProcessInfo.Find()
if hr != nil {
return
}
- return _hcsTerminateComputeSystem(computeSystem, _p0, result)
-}
-
-func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsTerminateComputeSystem.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsGetProcessInfo.Addr(), uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -224,20 +213,12 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result
return
}
-func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(options)
+func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) {
+ hr = procHcsGetProcessProperties.Find()
if hr != nil {
return
}
- return _hcsPauseComputeSystem(computeSystem, _p0, result)
-}
-
-func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsPauseComputeSystem.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsGetProcessProperties.Addr(), uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -247,20 +228,21 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u
return
}
-func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(options)
+ _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
if hr != nil {
return
}
- return _hcsResumeComputeSystem(computeSystem, _p0, result)
+ return _hcsGetServiceProperties(_p0, properties, result)
}
-func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsResumeComputeSystem.Find(); hr != nil {
+func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
+ hr = procHcsGetServiceProperties.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsGetServiceProperties.Addr(), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -270,20 +252,21 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **
return
}
-func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) {
+func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
+ _p0, hr = syscall.UTF16PtrFromString(configuration)
if hr != nil {
return
}
- return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result)
+ return _hcsModifyComputeSystem(computeSystem, _p0, result)
}
-func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
- if hr = procHcsGetComputeSystemProperties.Find(); hr != nil {
+func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) {
+ hr = procHcsModifyComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsModifyComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -293,20 +276,21 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint
return
}
-func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) {
+func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(configuration)
+ _p0, hr = syscall.UTF16PtrFromString(settings)
if hr != nil {
return
}
- return _hcsModifyComputeSystem(computeSystem, _p0, result)
+ return _hcsModifyProcess(process, _p0, result)
}
-func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) {
- if hr = procHcsModifyComputeSystem.Find(); hr != nil {
+func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) {
+ hr = procHcsModifyProcess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsModifyProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -326,10 +310,11 @@ func hcsModifyServiceSettings(settings string, result **uint16) (hr error) {
}
func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
- if hr = procHcsModifyServiceSettings.Find(); hr != nil {
+ hr = procHcsModifyServiceSettings.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procHcsModifyServiceSettings.Addr(), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -339,11 +324,21 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
return
}
-func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
- if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil {
+func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _hcsOpenComputeSystem(_p0, computeSystem, result)
+}
+
+func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) {
+ hr = procHcsOpenComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsOpenComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -353,11 +348,12 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr,
return
}
-func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) {
- if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil {
+func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) {
+ hr = procHcsOpenProcess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsOpenProcess.Addr(), uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -367,20 +363,21 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) {
return
}
-func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
- return _hcsSaveComputeSystem(computeSystem, _p0, result)
+ return _hcsPauseComputeSystem(computeSystem, _p0, result)
}
-func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
- if hr = procHcsSaveComputeSystem.Find(); hr != nil {
+func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsPauseComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsPauseComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -390,20 +387,12 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui
return
}
-func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(processParameters)
+func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
+ hr = procHcsRegisterComputeSystemCallback.Find()
if hr != nil {
return
}
- return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result)
-}
-
-func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) {
- if hr = procHcsCreateProcess.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procHcsRegisterComputeSystemCallback.Addr(), uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -413,11 +402,12 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce
return
}
-func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) {
- if hr = procHcsOpenProcess.Find(); hr != nil {
+func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
+ hr = procHcsRegisterProcessCallback.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsRegisterProcessCallback.Addr(), uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -427,11 +417,21 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re
return
}
-func hcsCloseProcess(process HcsProcess) (hr error) {
- if hr = procHcsCloseProcess.Find(); hr != nil {
+func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
+ return _hcsResumeComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsResumeComputeSystem.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procHcsResumeComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -441,11 +441,21 @@ func hcsCloseProcess(process HcsProcess) (hr error) {
return
}
-func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) {
- if hr = procHcsTerminateProcess.Find(); hr != nil {
+func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsSaveComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsSaveComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procHcsSaveComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -455,20 +465,21 @@ func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) {
return
}
-func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) {
+func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
- return _hcsSignalProcess(process, _p0, result)
+ return _hcsShutdownComputeSystem(computeSystem, _p0, result)
}
-func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) {
- if hr = procHcsSignalProcess.Find(); hr != nil {
+func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsShutdownComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsShutdownComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -478,11 +489,21 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr
return
}
-func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) {
- if hr = procHcsGetProcessInfo.Find(); hr != nil {
+func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsSignalProcess(process, _p0, result)
+}
+
+func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) {
+ hr = procHcsSignalProcess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsSignalProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -492,11 +513,21 @@ func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInforma
return
}
-func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) {
- if hr = procHcsGetProcessProperties.Find(); hr != nil {
+func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsStartComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsStartComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsStartComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -506,20 +537,21 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res
return
}
-func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) {
+func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(settings)
+ _p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
- return _hcsModifyProcess(process, _p0, result)
+ return _hcsTerminateComputeSystem(computeSystem, _p0, result)
}
-func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) {
- if hr = procHcsModifyProcess.Find(); hr != nil {
+func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) {
+ hr = procHcsTerminateComputeSystem.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsTerminateComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -529,20 +561,12 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h
return
}
-func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
+func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) {
+ hr = procHcsTerminateProcess.Find()
if hr != nil {
return
}
- return _hcsGetServiceProperties(_p0, properties, result)
-}
-
-func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
- if hr = procHcsGetServiceProperties.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procHcsTerminateProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -552,11 +576,12 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
return
}
-func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
- if hr = procHcsRegisterProcessCallback.Find(); hr != nil {
+func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) {
+ hr = procHcsUnregisterComputeSystemCallback.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsUnregisterComputeSystemCallback.Addr(), uintptr(callbackHandle))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -567,10 +592,11 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui
}
func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) {
- if hr = procHcsUnregisterProcessCallback.Find(); hr != nil {
+ hr = procHcsUnregisterProcessCallback.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procHcsUnregisterProcessCallback.Addr(), uintptr(callbackHandle))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
index ff81ac2c1..e12253c94 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -14,14 +16,14 @@ import (
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::ActivateLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
err = activateLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
deleted file mode 100644
index 3ec708d1e..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package wclayer
-
-import (
- "context"
- "errors"
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/Microsoft/go-winio"
- "github.com/Microsoft/hcsshim/internal/hcserror"
- "github.com/Microsoft/hcsshim/internal/oc"
- "github.com/Microsoft/hcsshim/internal/safefile"
- "github.com/Microsoft/hcsshim/internal/winapi"
- "go.opencensus.io/trace"
-)
-
-type baseLayerWriter struct {
- ctx context.Context
- s *trace.Span
-
- root *os.File
- f *os.File
- bw *winio.BackupFileWriter
- err error
- hasUtilityVM bool
- dirInfo []dirInfo
-}
-
-type dirInfo struct {
- path string
- fileInfo winio.FileBasicInfo
-}
-
-// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
-// after processing of the directory tree has completed. The times are expected
-// to be ordered such that parent directories come before child directories.
-func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
- for i := range dis {
- di := &dis[len(dis)-i-1] // reverse order: process child directories first
- f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT)
- if err != nil {
- return err
- }
-
- err = winio.SetFileBasicInfo(f, &di.fileInfo)
- f.Close()
- if err != nil {
- return err
- }
-
- }
- return nil
-}
-
-func (w *baseLayerWriter) closeCurrentFile() error {
- if w.f != nil {
- err := w.bw.Close()
- err2 := w.f.Close()
- w.f = nil
- w.bw = nil
- if err != nil {
- return err
- }
- if err2 != nil {
- return err2
- }
- }
- return nil
-}
-
-func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
- defer func() {
- if err != nil {
- w.err = err
- }
- }()
-
- err = w.closeCurrentFile()
- if err != nil {
- return err
- }
-
- if filepath.ToSlash(name) == `UtilityVM/Files` {
- w.hasUtilityVM = true
- }
-
- var f *os.File
- defer func() {
- if f != nil {
- f.Close()
- }
- }()
-
- extraFlags := uint32(0)
- if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
- extraFlags |= winapi.FILE_DIRECTORY_FILE
- w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
- }
-
- mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
- f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags)
- if err != nil {
- return hcserror.New(err, "Failed to safefile.OpenRelative", name)
- }
-
- err = winio.SetFileBasicInfo(f, fileInfo)
- if err != nil {
- return hcserror.New(err, "Failed to SetFileBasicInfo", name)
- }
-
- w.f = f
- w.bw = winio.NewBackupFileWriter(f, true)
- f = nil
- return nil
-}
-
-func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
- defer func() {
- if err != nil {
- w.err = err
- }
- }()
-
- err = w.closeCurrentFile()
- if err != nil {
- return err
- }
-
- return safefile.LinkRelative(target, w.root, name, w.root)
-}
-
-func (w *baseLayerWriter) Remove(name string) error {
- return errors.New("base layer cannot have tombstones")
-}
-
-func (w *baseLayerWriter) Write(b []byte) (int, error) {
- n, err := w.bw.Write(b)
- if err != nil {
- w.err = err
- }
- return n, err
-}
-
-func (w *baseLayerWriter) Close() (err error) {
- defer w.s.End()
- defer func() { oc.SetSpanStatus(w.s, err) }()
- defer func() {
- w.root.Close()
- w.root = nil
- }()
-
- err = w.closeCurrentFile()
- if err != nil {
- return err
- }
- if w.err == nil {
- // Restore the file times of all the directories, since they may have
- // been modified by creating child directories.
- err = reapplyDirectoryTimes(w.root, w.dirInfo)
- if err != nil {
- return err
- }
-
- err = ProcessBaseLayer(w.ctx, w.root.Name())
- if err != nil {
- return err
- }
-
- if w.hasUtilityVM {
- err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root)
- if err != nil {
- return err
- }
- err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM"))
- if err != nil {
- return err
- }
- }
- }
- return w.err
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
new file mode 100644
index 000000000..807b7de1f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
@@ -0,0 +1,218 @@
+//go:build windows
+
+package wclayer
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/Microsoft/hcsshim/internal/longpath"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "go.opencensus.io/trace"
+)
+
+type baseLayerReader struct {
+ s *trace.Span
+ root string
+ result chan *fileEntry
+ proceed chan bool
+ currentFile *os.File
+ backupReader *winio.BackupFileReader
+}
+
+func newBaseLayerReader(root string, s *trace.Span) (r *baseLayerReader) {
+ r = &baseLayerReader{
+ s: s,
+ root: root,
+ result: make(chan *fileEntry),
+ proceed: make(chan bool),
+ }
+ go r.walk()
+ return r
+}
+
+func (r *baseLayerReader) walkUntilCancelled() error {
+ root, err := longpath.LongAbs(r.root)
+ if err != nil {
+ return err
+ }
+
+ r.root = root
+
+ err = filepath.Walk(filepath.Join(r.root, filesPath), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
+ // Handle failure from what may be a golang bug in the conversion of
+ // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
+ // which is called by filepath.Walk will fail when a filename contains
+ // unicode characters. Skip the recycle bin regardless which is goodness.
+ if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() {
+ return filepath.SkipDir
+ }
+
+ r.result <- &fileEntry{path, info, nil}
+ if !<-r.proceed {
+ return errorIterationCanceled
+ }
+
+ return nil
+ })
+
+ if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ utilityVMAbsPath := filepath.Join(r.root, UtilityVMPath)
+ utilityVMFilesAbsPath := filepath.Join(r.root, UtilityVMFilesPath)
+
+ // Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM
+ if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil {
+ if os.IsNotExist(err) {
+ return io.EOF
+ }
+ return err
+ }
+
+ err = filepath.Walk(utilityVMAbsPath, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if path != utilityVMAbsPath && path != utilityVMFilesAbsPath && !hasPathPrefix(path, utilityVMFilesAbsPath) {
+ if info.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ r.result <- &fileEntry{path, info, nil}
+ if !<-r.proceed {
+ return errorIterationCanceled
+ }
+
+ return nil
+ })
+
+ if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ return io.EOF
+}
+
+func (r *baseLayerReader) walk() {
+ defer close(r.result)
+ if !<-r.proceed {
+ return
+ }
+
+ err := r.walkUntilCancelled()
+ if err != nil {
+ for {
+ r.result <- &fileEntry{err: err}
+ if !<-r.proceed {
+ return
+ }
+ }
+ }
+}
+
+func (r *baseLayerReader) reset() {
+ if r.backupReader != nil {
+ r.backupReader.Close()
+ r.backupReader = nil
+ }
+ if r.currentFile != nil {
+ r.currentFile.Close()
+ r.currentFile = nil
+ }
+}
+
+func (r *baseLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
+ r.reset()
+ r.proceed <- true
+ fe := <-r.result
+ if fe == nil {
+ err = errors.New("BaseLayerReader closed")
+ return
+ }
+ if fe.err != nil {
+ err = fe.err
+ return
+ }
+
+ path, err = filepath.Rel(r.root, fe.path)
+ if err != nil {
+ return
+ }
+
+ f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ }()
+
+ fileInfo, err = winio.GetFileBasicInfo(f)
+ if err != nil {
+ return
+ }
+
+ size = fe.fi.Size()
+ r.backupReader = winio.NewBackupFileReader(f, true)
+
+ r.currentFile = f
+ f = nil
+ return
+}
+
+func (r *baseLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) {
+ fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile)
+ if err != nil {
+ return 0, nil, err
+ }
+ fileIDInfo, err := winio.GetFileID(r.currentFile)
+ if err != nil {
+ return 0, nil, err
+ }
+ return fileStandardInfo.NumberOfLinks, fileIDInfo, nil
+}
+
+func (r *baseLayerReader) Read(b []byte) (int, error) {
+ if r.backupReader == nil {
+ return 0, io.EOF
+ }
+ return r.backupReader.Read(b)
+}
+
+func (r *baseLayerReader) Close() (err error) {
+ defer r.s.End()
+ defer func() {
+ oc.SetSpanStatus(r.s, err)
+ close(r.proceed)
+ }()
+ r.proceed <- false
+ // The r.result channel will be closed once walk() returns
+ <-r.result
+ r.reset()
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go
new file mode 100644
index 000000000..aea8b421e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go
@@ -0,0 +1,183 @@
+//go:build windows
+
+package wclayer
+
+import (
+ "context"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/Microsoft/hcsshim/internal/hcserror"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "github.com/Microsoft/hcsshim/internal/safefile"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "go.opencensus.io/trace"
+)
+
+type baseLayerWriter struct {
+ ctx context.Context
+ s *trace.Span
+
+ root *os.File
+ f *os.File
+ bw *winio.BackupFileWriter
+ err error
+ hasUtilityVM bool
+ dirInfo []dirInfo
+}
+
+type dirInfo struct {
+ path string
+ fileInfo winio.FileBasicInfo
+}
+
+// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
+// after processing of the directory tree has completed. The times are expected
+// to be ordered such that parent directories come before child directories.
+func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
+ for i := range dis {
+ di := &dis[len(dis)-i-1] // reverse order: process child directories first
+ f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT)
+ if err != nil {
+ return err
+ }
+
+ err = winio.SetFileBasicInfo(f, &di.fileInfo)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *baseLayerWriter) closeCurrentFile() error {
+ if w.f != nil {
+ err := w.bw.Close()
+ err2 := w.f.Close()
+ w.f = nil
+ w.bw = nil
+ if err != nil {
+ return err
+ }
+ if err2 != nil {
+ return err2
+ }
+ }
+ return nil
+}
+
+func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
+ defer func() {
+ if err != nil {
+ w.err = err
+ }
+ }()
+
+ err = w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+
+ if filepath.ToSlash(name) == `UtilityVM/Files` {
+ w.hasUtilityVM = true
+ }
+
+ var f *os.File
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ }()
+
+ extraFlags := uint32(0)
+ if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ extraFlags |= winapi.FILE_DIRECTORY_FILE
+ w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
+ }
+
+ mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
+ f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags)
+ if err != nil {
+ return hcserror.New(err, "Failed to safefile.OpenRelative", name)
+ }
+
+ err = winio.SetFileBasicInfo(f, fileInfo)
+ if err != nil {
+ return hcserror.New(err, "Failed to SetFileBasicInfo", name)
+ }
+
+ w.f = f
+ w.bw = winio.NewBackupFileWriter(f, true)
+ f = nil
+ return nil
+}
+
+func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
+ defer func() {
+ if err != nil {
+ w.err = err
+ }
+ }()
+
+ err = w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+
+ return safefile.LinkRelative(target, w.root, name, w.root)
+}
+
+func (w *baseLayerWriter) Remove(name string) error {
+ return errors.New("base layer cannot have tombstones")
+}
+
+func (w *baseLayerWriter) Write(b []byte) (int, error) {
+ n, err := w.bw.Write(b)
+ if err != nil {
+ w.err = err
+ }
+ return n, err
+}
+
+func (w *baseLayerWriter) Close() (err error) {
+ defer w.s.End()
+ defer func() { oc.SetSpanStatus(w.s, err) }()
+ defer func() {
+ w.root.Close()
+ w.root = nil
+ }()
+
+ err = w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+ if w.err == nil {
+ // Restore the file times of all the directories, since they may have
+ // been modified by creating child directories.
+ err = reapplyDirectoryTimes(w.root, w.dirInfo)
+ if err != nil {
+ return err
+ }
+
+ err = ProcessBaseLayer(w.ctx, w.root.Name())
+ if err != nil {
+ return err
+ }
+
+ if w.hasUtilityVM {
+ err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root)
+ if err != nil {
+ return err
+ }
+ err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM"))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return w.err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
new file mode 100644
index 000000000..d25c3c520
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
@@ -0,0 +1,159 @@
+//go:build windows
+
+package wclayer
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Microsoft/hcsshim/internal/hcserror"
+ "github.com/Microsoft/hcsshim/internal/longpath"
+ "github.com/Microsoft/hcsshim/internal/oc"
+ "github.com/Microsoft/hcsshim/internal/safefile"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "github.com/pkg/errors"
+ "go.opencensus.io/trace"
+ "golang.org/x/sys/windows"
+)
+
+var hiveNames = []string{"DEFAULT", "SAM", "SECURITY", "SOFTWARE", "SYSTEM"}
+
+// Ensure the given file exists as an ordinary file, and create a minimal hive file if not.
+func ensureHive(path string, root *os.File) (err error) {
+ _, err = safefile.LstatRelative(path, root)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("accessing %s: %w", path, err)
+ }
+
+ version := windows.RtlGetVersion()
+ if version == nil {
+ return fmt.Errorf("failed to get OS version")
+ }
+
+ var fullPath string
+ fullPath, err = longpath.LongAbs(filepath.Join(root.Name(), path))
+ if err != nil {
+ return fmt.Errorf("getting path: %w", err)
+ }
+
+ var key winapi.ORHKey
+ err = winapi.ORCreateHive(&key)
+ if err != nil {
+ return fmt.Errorf("creating hive: %w", err)
+ }
+
+ defer func() {
+ closeErr := winapi.ORCloseHive(key)
+ if closeErr != nil && err == nil {
+ err = fmt.Errorf("closing hive key: %w", closeErr)
+ }
+ }()
+
+ err = winapi.ORSaveHive(key, fullPath, version.MajorVersion, version.MinorVersion)
+ if err != nil {
+ return fmt.Errorf("saving hive: %w", err)
+ }
+
+ return nil
+}
+
+func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) {
+ // The base layer registry hives will be copied from here
+ const hiveSourcePath = "Files\\Windows\\System32\\config"
+ if err = safefile.MkdirAllRelative(hiveSourcePath, root); err != nil {
+ return
+ }
+
+ for _, hiveName := range hiveNames {
+ hivePath := filepath.Join(hiveSourcePath, hiveName)
+ if err = ensureHive(hivePath, root); err != nil {
+ return
+ }
+ }
+
+ stat, err := safefile.LstatRelative(UtilityVMFilesPath, root)
+
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+
+ if err != nil {
+ return
+ }
+
+ if !stat.Mode().IsDir() {
+ fullPath := filepath.Join(root.Name(), UtilityVMFilesPath)
+ return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String())
+ }
+
+ const bcdRelativePath = "EFI\\Microsoft\\Boot\\BCD"
+
+ // Just check that this exists as a regular file. If it exists but is not a valid registry hive,
+ // ProcessUtilityVMImage will complain:
+ // "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry."
+ bcdPath := filepath.Join(UtilityVMFilesPath, bcdRelativePath)
+
+ stat, err = safefile.LstatRelative(bcdPath, root)
+ if err != nil {
+ return false, errors.Wrapf(err, "UtilityVM must contain '%s'", bcdRelativePath)
+ }
+
+ if !stat.Mode().IsRegular() {
+ fullPath := filepath.Join(root.Name(), bcdPath)
+ return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String())
+ }
+
+ return true, nil
+}
+
+func convertToBaseLayer(ctx context.Context, root *os.File) error {
+ hasUtilityVM, err := ensureBaseLayer(root)
+
+ if err != nil {
+ return err
+ }
+
+ if err := ProcessBaseLayer(ctx, root.Name()); err != nil {
+ return err
+ }
+
+ if !hasUtilityVM {
+ return nil
+ }
+
+ err = safefile.EnsureNotReparsePointRelative(UtilityVMPath, root)
+ if err != nil {
+ return err
+ }
+
+ utilityVMPath := filepath.Join(root.Name(), UtilityVMPath)
+ return ProcessUtilityVMImage(ctx, utilityVMPath)
+}
+
+// ConvertToBaseLayer processes a candidate base layer, i.e. a directory
+// containing the desired file content under Files/, and optionally the
+// desired file content for a UtilityVM under UtilityVM/Files/
+func ConvertToBaseLayer(ctx context.Context, path string) (err error) {
+ title := "hcsshim::ConvertToBaseLayer"
+ ctx, span := trace.StartSpan(ctx, title)
+ defer span.End()
+ defer func() { oc.SetSpanStatus(span, err) }()
+ span.AddAttributes(trace.StringAttribute("path", path))
+
+ root, err := safefile.OpenRoot(path)
+ if err != nil {
+ return hcserror.New(err, title+" - failed", "")
+ }
+ defer func() {
+ if err2 := root.Close(); err == nil && err2 != nil {
+ err = hcserror.New(err2, title+" - failed", "")
+ }
+ }()
+
+ if err = convertToBaseLayer(ctx, root); err != nil {
+ return hcserror.New(err, title+" - failed", "")
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
index ffee31ab1..932475723 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,7 +14,7 @@ import (
// the parent layer provided.
func CreateLayer(ctx context.Context, path, parent string) (err error) {
title := "hcsshim::CreateLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -21,7 +23,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) {
err = createLayer(&stdDriverInfo, path, parent)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
index 5a3809ae2..5c9d5d250 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -13,7 +15,7 @@ import (
// This requires the full list of paths to all parent layers up to the base
func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::CreateScratchLayer"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -28,7 +30,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
index d5bf2f5bd..e3bc77cbc 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -11,7 +13,7 @@ import (
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DeactivateLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
index 787054e79..d0a59efe1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,14 +14,14 @@ import (
// path, including that layer's containing folder, if any.
func DestroyLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DestroyLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
err = destroyLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go
new file mode 100644
index 000000000..dd1d55580
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go
@@ -0,0 +1,4 @@
+// Package wclayer provides bindings to HCS's legacy layer management API and
+// provides a higher level interface around these calls for container layer
+// management.
+package wclayer
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
index 22f7605be..35fcbedb3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -9,14 +11,13 @@ import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/oc"
- "github.com/Microsoft/hcsshim/osversion"
"go.opencensus.io/trace"
)
// ExpandScratchSize expands the size of a layer to at least size bytes.
func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) {
title := "hcsshim::ExpandScratchSize"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -25,17 +26,20 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error
err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
- // Manually expand the volume now in order to work around bugs in 19H1 and
- // prerelease versions of Vb. Remove once this is fixed in Windows.
- if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 {
- err = expandSandboxVolume(ctx, path)
- if err != nil {
- return err
- }
+ // Always expand the volume too. In case of legacy layers not expanding the volume here works because
+ // the PrepareLayer call internally handles the expansion. However, in other cases (like CimFS) we
+ // don't call PrepareLayer and so the volume will never be expanded. This also means in case of
+ // legacy layers, we might have a small perf hit because the VHD is mounted twice for expansion (once
+ // here and once during the PrepareLayer call). But as long as the perf hit is minimal, we should be
+ // okay.
+ err = expandSandboxVolume(ctx, path)
+ if err != nil {
+ return err
}
+
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
index 09f0de1a4..d4c677aab 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
@@ -1,8 +1,9 @@
+//go:build windows
+
package wclayer
import (
"context"
- "io/ioutil"
"os"
"strings"
@@ -19,7 +20,7 @@ import (
// perform the export.
func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ExportLayer"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -35,14 +36,21 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
+// LayerReader is an interface that supports reading an existing container image layer.
type LayerReader interface {
+ // Next advances to the next file and returns the name, size, and file info
Next() (string, int64, *winio.FileBasicInfo, error)
+ // LinkInfo returns the number of links and the file identifier for the current file.
+ LinkInfo() (uint32, *winio.FileIDInfo, error)
+ // Read reads data from the current file, in the format of a Win32 backup stream, and
+ // returns the number of bytes read.
Read(b []byte) (int, error)
+ // Close finishes the layer reading process and releases any resources.
Close() error
}
@@ -50,7 +58,7 @@ type LayerReader interface {
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) {
- ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader")
+ ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerReader")
defer func() {
if err != nil {
oc.SetSpanStatus(span, err)
@@ -61,7 +69,12 @@ func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string)
trace.StringAttribute("path", path),
trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
- exportPath, err := ioutil.TempDir("", "hcs")
+ if len(parentLayerPaths) == 0 {
+ // This is a base layer. It gets exported differently.
+ return newBaseLayerReader(path, span), nil
+ }
+
+ exportPath, err := os.MkdirTemp("", "hcs")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
index 4d22d0ecf..715e06e37 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -16,7 +18,7 @@ import (
// folder path at which the layer is stored.
func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
title := "hcsshim::GetLayerMountPath"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
@@ -27,7 +29,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (1)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil {
- return "", hcserror.New(err, title+" - failed", "(first call)")
+ return "", hcserror.New(err, title, "(first call)")
}
// Allocate a mount path of the returned length.
@@ -41,7 +43,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil {
- return "", hcserror.New(err, title+" - failed", "(second call)")
+ return "", hcserror.New(err, title, "(second call)")
}
mountPath := syscall.UTF16ToString(mountPathp[0:])
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
index bcc8fbd42..5e400fb20 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -14,14 +16,14 @@ import (
// of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
title := "hcsshim::GetSharedBaseImages"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
- return "", hcserror.New(err, title+" - failed", "")
+ return "", hcserror.New(err, title, "")
}
imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
span.AddAttributes(trace.StringAttribute("imageData", imageData))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
index 3eaca2780..20217ed81 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -11,7 +13,7 @@ import (
// GrantVmAccess adds access to a file for a given VM
func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) {
title := "hcsshim::GrantVmAccess"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -20,7 +22,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error
err = grantVmAccess(vmid, filepath)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
index b3c150d66..50f669a26 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
@@ -1,8 +1,9 @@
+//go:build windows
+
package wclayer
import (
"context"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -20,7 +21,7 @@ import (
// be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ImportLayer"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -36,7 +37,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
@@ -124,7 +125,7 @@ func (r *legacyLayerWriterWrapper) Close() (err error) {
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) {
- ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter")
+ ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerWriter")
defer func() {
if err != nil {
oc.SetSpanStatus(span, err)
@@ -148,7 +149,7 @@ func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string)
}, nil
}
- importPath, err := ioutil.TempDir("", "hcs")
+ importPath, err := os.MkdirTemp("", "hcs")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
index c6999973c..4d82977ea 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,7 +14,7 @@ import (
// to the system.
func LayerExists(ctx context.Context, path string) (_ bool, err error) {
title := "hcsshim::LayerExists"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
@@ -21,7 +23,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) {
var exists uint32
err = layerExists(&stdDriverInfo, path, &exists)
if err != nil {
- return false, hcserror.New(err, title+" - failed", "")
+ return false, hcserror.New(err, title, "")
}
span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
return exists != 0, nil
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
index 0ce34a30f..d4805f144 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,7 +14,7 @@ import (
// LayerID returns the layer ID of a layer on disk.
func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) {
title := "hcsshim::LayerID"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
index 1ec893c6a..ee17dd3d1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
// This file contains utility functions to support storage (graph) related
@@ -5,13 +7,19 @@ package wclayer
import (
"context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/sirupsen/logrus"
)
-/* To pass into syscall, we need a struct matching the following:
+/*
+To pass into syscall, we need a struct matching the following:
+
enum GraphDriverType
{
DiffDriver,
@@ -34,32 +42,34 @@ var (
stdDriverInfo = driverInfo{1, &utf16EmptyString}
)
-/* To pass into syscall, we need a struct matching the following:
+/*
+To pass into syscall, we need a struct matching the following:
+
typedef struct _WC_LAYER_DESCRIPTOR {
- //
- // The ID of the layer
- //
+ //
+ // The ID of the layer
+ //
- GUID LayerId;
+ GUID LayerId;
- //
- // Additional flags
- //
+ //
+ // Additional flags
+ //
- union {
- struct {
- ULONG Reserved : 31;
- ULONG Dirty : 1; // Created from sandbox as a result of snapshot
- };
- ULONG Value;
- } Flags;
+ union {
+ struct {
+ ULONG Reserved : 31;
+ ULONG Dirty : 1; // Created from sandbox as a result of snapshot
+ };
+ ULONG Value;
+ } Flags;
- //
- // Path to the layer root directory, null-terminated
- //
+ //
+ // Path to the layer root directory, null-terminated
+ //
- PCWSTR Path;
+ PCWSTR Path;
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
*/
@@ -95,3 +105,23 @@ func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]
return layers, nil
}
+
+// GetLayerUvmBuild looks for a file named `uvmbuildversion` at `layerPath\uvmbuildversion` and returns the
+// build number of the UVM from that file.
+func GetLayerUvmBuild(layerPath string) (uint16, error) {
+ data, err := os.ReadFile(filepath.Join(layerPath, UvmBuildFileName))
+ if err != nil {
+ return 0, err
+ }
+ ver, err := strconv.ParseUint(string(data), 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(ver), nil
+}
+
+// WriteLayerUvmBuildFile writes a file at path `layerPath\uvmbuildversion` that contains the given `build`
+// version for future reference.
+func WriteLayerUvmBuildFile(layerPath string, build uint16) error {
+ return os.WriteFile(filepath.Join(layerPath, UvmBuildFileName), []byte(fmt.Sprintf("%d", build)), 0777)
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
index 83ba72cfa..fc12eeba4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -6,7 +8,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -28,10 +29,19 @@ var mutatedUtilityVMFiles = map[string]bool{
}
const (
- filesPath = `Files`
- hivesPath = `Hives`
- utilityVMPath = `UtilityVM`
- utilityVMFilesPath = `UtilityVM\Files`
+ filesPath = `Files`
+ HivesPath = `Hives`
+ UtilityVMPath = `UtilityVM`
+ UtilityVMFilesPath = `UtilityVM\Files`
+ RegFilesPath = `Files\Windows\System32\config`
+ BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD`
+ BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi`
+ ContainerBaseVhd = `blank-base.vhdx`
+ ContainerScratchVhd = `blank.vhdx`
+ UtilityVMBaseVhd = `SystemTemplateBase.vhdx`
+ UtilityVMScratchVhd = `SystemTemplate.vhdx`
+ LayoutFileName = `layout`
+ UvmBuildFileName = `uvmbuildversion`
)
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
@@ -76,7 +86,7 @@ func readTombstones(path string) (map[string]([]string), error) {
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
- return nil, errors.New("Invalid tombstones file")
+ return nil, errors.New("invalid tombstones file")
}
ts := make(map[string]([]string))
@@ -144,7 +154,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error {
}
return nil
})
- if err == errorIterationCanceled {
+ if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
return nil
}
if err == nil {
@@ -186,7 +196,7 @@ func findBackupStreamSize(r io.Reader) (int64, error) {
for {
hdr, err := br.Next()
if err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
err = nil
}
return 0, err
@@ -242,11 +252,11 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
if !hasPathPrefix(path, filesPath) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
- if path == hivesPath || path == filesPath {
+ if path == HivesPath || path == filesPath {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
- g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
+ g, err = os.Open(filepath.Join(r.root, HivesPath, `System_Delta`))
if err != nil {
return
}
@@ -262,7 +272,6 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
// The creation time and access time get reset for files outside of the Files path.
fileInfo.CreationTime = fileInfo.LastWriteTime
fileInfo.LastAccessTime = fileInfo.LastWriteTime
-
} else {
// The file attributes are written before the backup stream.
var attr uint32
@@ -294,6 +303,18 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
return
}
+func (r *legacyLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) {
+ fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile)
+ if err != nil {
+ return 0, nil, err
+ }
+ fileIDInfo, err := winio.GetFileID(r.currentFile)
+ if err != nil {
+ return 0, nil, err
+ }
+ return fileStandardInfo.NumberOfLinks, fileIDInfo, nil
+}
+
func (r *legacyLayerReader) Read(b []byte) (int, error) {
if r.backupReader == nil {
if r.currentFile == nil {
@@ -349,7 +370,7 @@ type legacyLayerWriter struct {
currentIsDir bool
}
-// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
+// newLegacyLayerWriter returns a LayerWriter that can write the container layer
// transport format to disk.
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) {
w = &legacyLayerWriter{
@@ -376,7 +397,7 @@ func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w
}
w.parentRoots = append(w.parentRoots, f)
}
- w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536)
+ w.bufWriter = bufio.NewWriterSize(io.Discard, 65536)
return
}
@@ -397,7 +418,7 @@ func (w *legacyLayerWriter) CloseRoots() {
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
- err := safefile.MkdirRelative(utilityVMPath, w.destRoot)
+ err := safefile.MkdirRelative(UtilityVMPath, w.destRoot)
if err != nil {
return err
}
@@ -405,9 +426,9 @@ func (w *legacyLayerWriter) initUtilityVM() error {
// clone the utility VM from the parent layer into this layer. Use hard
// links to avoid unnecessary copying, since most of the files are
// immutable.
- err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles)
+ err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles)
if err != nil {
- return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
+ return fmt.Errorf("cloning the parent utility VM image failed: %w", err)
}
w.HasUtilityVM = true
}
@@ -419,7 +440,7 @@ func (w *legacyLayerWriter) reset() error {
if err != nil {
return err
}
- w.bufWriter.Reset(ioutil.Discard)
+ w.bufWriter.Reset(io.Discard)
if w.currentIsDir {
r := w.currentFile
br := winio.NewBackupStreamReader(r)
@@ -430,7 +451,7 @@ func (w *legacyLayerWriter) reset() error {
for {
bhdr, err := br.Next()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
// end of backupstream data
break
}
@@ -580,7 +601,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
return err
}
- if name == utilityVMPath {
+ if name == UtilityVMPath {
return w.initUtilityVM()
}
@@ -589,11 +610,11 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
name = filepath.Clean(name)
- if hasPathPrefix(name, utilityVMPath) {
+ if hasPathPrefix(name, UtilityVMPath) {
if !w.HasUtilityVM {
return errors.New("missing UtilityVM directory")
}
- if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
+ if !hasPathPrefix(name, UtilityVMFilesPath) && name != UtilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
createDisposition := uint32(winapi.FILE_OPEN)
@@ -687,7 +708,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
return err
}
- if hasPathPrefix(name, hivesPath) {
+ if hasPathPrefix(name, HivesPath) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
w.bufWriter.Reset(w.backupWriter)
} else {
@@ -695,7 +716,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
// The file attributes are written before the stream.
err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
- w.bufWriter.Reset(ioutil.Discard)
+ w.bufWriter.Reset(io.Discard)
return err
}
}
@@ -719,18 +740,18 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
// Look for cross-layer hard link targets in the parent layers, since
// nothing is in the destination path yet.
roots = w.parentRoots
- } else if hasPathPrefix(target, utilityVMFilesPath) {
+ } else if hasPathPrefix(target, UtilityVMFilesPath) {
// Since the utility VM is fully cloned into the destination path
// already, look for cross-layer hard link targets directly in the
// destination path.
roots = []*os.File{w.destRoot}
}
- if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
+ if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, UtilityVMFilesPath)) {
return errors.New("invalid hard link in layer")
}
- // Find to try the target of the link in a previously added file. If that
+ // Try to find the target of the link in a previously added file. If that
// fails, search in parent layers.
var selectedRoot *os.File
if _, ok := w.addedFiles[target]; ok {
@@ -765,7 +786,7 @@ func (w *legacyLayerWriter) Remove(name string) error {
name = filepath.Clean(name)
if hasPathPrefix(name, filesPath) {
w.Tombstones = append(w.Tombstones, name)
- } else if hasPathPrefix(name, utilityVMFilesPath) {
+ } else if hasPathPrefix(name, UtilityVMFilesPath) {
err := w.initUtilityVM()
if err != nil {
return err
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
index bcf39c6b8..c45fa2750 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -14,15 +16,15 @@ import (
// across all clients.
func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
title := "hcsshim::NameToGuid"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
- span.AddAttributes(trace.StringAttribute("name", name))
+ span.AddAttributes(trace.StringAttribute("objectName", name))
var id guid.GUID
err = nameToGuid(name, &id)
if err != nil {
- return guid.GUID{}, hcserror.New(err, title+" - failed", "")
+ return guid.GUID{}, hcserror.New(err, title, "")
}
span.AddAttributes(trace.StringAttribute("guid", id.String()))
return id, nil
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
index 55f7730d0..b66e07124 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -19,7 +21,7 @@ var prepareLayerLock sync.Mutex
// Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::PrepareLayer"
- ctx, span := trace.StartSpan(ctx, title)
+ ctx, span := oc.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -38,7 +40,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (
defer prepareLayerLock.Unlock()
err = prepareLayer(&stdDriverInfo, path, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
index 30bcdff5f..7c49cbda4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,7 +14,7 @@ import (
// The files should have been extracted to \Files.
func ProcessBaseLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::ProcessBaseLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
@@ -28,7 +30,7 @@ func ProcessBaseLayer(ctx context.Context, path string) (err error) {
// The files should have been extracted to \Files.
func ProcessUtilityVMImage(ctx context.Context, path string) (err error) {
title := "hcsshim::ProcessUtilityVMImage"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
index 79fb98678..fe20702c1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package wclayer
import (
@@ -12,14 +14,14 @@ import (
// the given id.
func UnprepareLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::UnprepareLayer"
- ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
+ ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
err = unprepareLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
index 9b1e06d50..39682b817 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
@@ -1,11 +1,10 @@
-// Package wclayer provides bindings to HCS's legacy layer management API and
-// provides a higher level interface around these calls for container layer
-// management.
+//go:build windows
+
package wclayer
import "github.com/Microsoft/go-winio/pkg/guid"
-//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go wclayer.go
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
index 67f917f07..403b94fc5 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package wclayer
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,44 +29,83 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
var (
- modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
- modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll")
+ modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
+ procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW")
+ procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
+ procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
procActivateLayer = modvmcompute.NewProc("ActivateLayer")
procCopyLayer = modvmcompute.NewProc("CopyLayer")
procCreateLayer = modvmcompute.NewProc("CreateLayer")
procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer")
- procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize")
procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer")
procDestroyLayer = modvmcompute.NewProc("DestroyLayer")
+ procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize")
procExportLayer = modvmcompute.NewProc("ExportLayer")
- procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath")
procGetBaseImages = modvmcompute.NewProc("GetBaseImages")
+ procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath")
+ procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess")
procImportLayer = modvmcompute.NewProc("ImportLayer")
procLayerExists = modvmcompute.NewProc("LayerExists")
procNameToGuid = modvmcompute.NewProc("NameToGuid")
procPrepareLayer = modvmcompute.NewProc("PrepareLayer")
- procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer")
procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage")
procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage")
- procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess")
- procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
- procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
- procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW")
+ procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer")
)
+func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(directoryName)
+ if err != nil {
+ return
+ }
+ return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes)
+}
+
+func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) {
+ r1, _, e1 := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
+ if r1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
+}
+
+func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
+ if r1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func activateLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
@@ -74,10 +116,11 @@ func activateLayer(info *driverInfo, id string) (hr error) {
}
func _activateLayer(info *driverInfo, id *uint16) (hr error) {
- if hr = procActivateLayer.Find(); hr != nil {
+ hr = procActivateLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ r0, _, _ := syscall.SyscallN(procActivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -102,14 +145,15 @@ func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LA
}
func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ hr = procCopyLayer.Find()
+ if hr != nil {
+ return
+ }
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
- if hr = procCopyLayer.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ r0, _, _ := syscall.SyscallN(procCopyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -134,10 +178,11 @@ func createLayer(info *driverInfo, id string, parent string) (hr error) {
}
func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
- if hr = procCreateLayer.Find(); hr != nil {
+ hr = procCreateLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
+ r0, _, _ := syscall.SyscallN(procCreateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -157,14 +202,15 @@ func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors
}
func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ hr = procCreateSandboxLayer.Find()
+ if hr != nil {
+ return
+ }
var _p1 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p1 = &descriptors[0]
}
- if hr = procCreateSandboxLayer.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
+ r0, _, _ := syscall.SyscallN(procCreateSandboxLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -174,20 +220,21 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto
return
}
-func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) {
+func deactivateLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
- return _expandSandboxSize(info, _p0, size)
+ return _deactivateLayer(info, _p0)
}
-func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
- if hr = procExpandSandboxSize.Find(); hr != nil {
+func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
+ hr = procDeactivateLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
+ r0, _, _ := syscall.SyscallN(procDeactivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -197,20 +244,21 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
return
}
-func deactivateLayer(info *driverInfo, id string) (hr error) {
+func destroyLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
- return _deactivateLayer(info, _p0)
+ return _destroyLayer(info, _p0)
}
-func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
- if hr = procDeactivateLayer.Find(); hr != nil {
+func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
+ hr = procDestroyLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ r0, _, _ := syscall.SyscallN(procDestroyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -220,20 +268,21 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
return
}
-func destroyLayer(info *driverInfo, id string) (hr error) {
+func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
- return _destroyLayer(info, _p0)
+ return _expandSandboxSize(info, _p0, size)
}
-func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
- if hr = procDestroyLayer.Find(); hr != nil {
+func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
+ hr = procExpandSandboxSize.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ r0, _, _ := syscall.SyscallN(procExpandSandboxSize.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -258,14 +307,30 @@ func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYE
}
func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ hr = procExportLayer.Find()
+ if hr != nil {
+ return
+ }
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
- if hr = procExportLayer.Find(); hr != nil {
+ r0, _, _ := syscall.SyscallN(procExportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func getBaseImages(buffer **uint16) (hr error) {
+ hr = procGetBaseImages.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ r0, _, _ := syscall.SyscallN(procGetBaseImages.Addr(), uintptr(unsafe.Pointer(buffer)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -285,10 +350,11 @@ func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uin
}
func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) {
- if hr = procGetLayerMountPath.Find(); hr != nil {
+ hr = procGetLayerMountPath.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLayerMountPath.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -298,11 +364,26 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u
return
}
-func getBaseImages(buffer **uint16) (hr error) {
- if hr = procGetBaseImages.Find(); hr != nil {
+func grantVmAccess(vmid string, filepath string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(vmid)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(filepath)
+ if hr != nil {
+ return
+ }
+ return _grantVmAccess(_p0, _p1)
+}
+
+func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
+ hr = procGrantVmAccess.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGrantVmAccess.Addr(), uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -327,14 +408,15 @@ func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYE
}
func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ hr = procImportLayer.Find()
+ if hr != nil {
+ return
+ }
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
- if hr = procImportLayer.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ r0, _, _ := syscall.SyscallN(procImportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -354,10 +436,11 @@ func layerExists(info *driverInfo, id string, exists *uint32) (hr error) {
}
func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
- if hr = procLayerExists.Find(); hr != nil {
+ hr = procLayerExists.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
+ r0, _, _ := syscall.SyscallN(procLayerExists.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -377,10 +460,11 @@ func nameToGuid(name string, guid *_guid) (hr error) {
}
func _nameToGuid(name *uint16, guid *_guid) (hr error) {
- if hr = procNameToGuid.Find(); hr != nil {
+ hr = procNameToGuid.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
+ r0, _, _ := syscall.SyscallN(procNameToGuid.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -400,37 +484,15 @@ func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR
}
func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
- var _p1 *WC_LAYER_DESCRIPTOR
- if len(descriptors) > 0 {
- _p1 = &descriptors[0]
- }
- if hr = procPrepareLayer.Find(); hr != nil {
- return
- }
- r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
- if int32(r0) < 0 {
- if r0&0x1fff0000 == 0x00070000 {
- r0 &= 0xffff
- }
- hr = syscall.Errno(r0)
- }
- return
-}
-
-func unprepareLayer(info *driverInfo, id string) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(id)
+ hr = procPrepareLayer.Find()
if hr != nil {
return
}
- return _unprepareLayer(info, _p0)
-}
-
-func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
- if hr = procUnprepareLayer.Find(); hr != nil {
- return
+ var _p1 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p1 = &descriptors[0]
}
- r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ r0, _, _ := syscall.SyscallN(procPrepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -450,10 +512,11 @@ func processBaseImage(path string) (hr error) {
}
func _processBaseImage(path *uint16) (hr error) {
- if hr = procProcessBaseImage.Find(); hr != nil {
+ hr = procProcessBaseImage.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procProcessBaseImage.Addr(), uintptr(unsafe.Pointer(path)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -473,10 +536,11 @@ func processUtilityImage(path string) (hr error) {
}
func _processUtilityImage(path *uint16) (hr error) {
- if hr = procProcessUtilityImage.Find(); hr != nil {
+ hr = procProcessUtilityImage.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procProcessUtilityImage.Addr(), uintptr(unsafe.Pointer(path)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -486,25 +550,21 @@ func _processUtilityImage(path *uint16) (hr error) {
return
}
-func grantVmAccess(vmid string, filepath string) (hr error) {
+func unprepareLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(vmid)
- if hr != nil {
- return
- }
- var _p1 *uint16
- _p1, hr = syscall.UTF16PtrFromString(filepath)
+ _p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
- return _grantVmAccess(_p0, _p1)
+ return _unprepareLayer(info, _p0)
}
-func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
- if hr = procGrantVmAccess.Find(); hr != nil {
+func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
+ hr = procUnprepareLayer.Find()
+ if hr != nil {
return
}
- r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
+ r0, _, _ := syscall.SyscallN(procUnprepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -513,57 +573,3 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
}
return
}
-
-func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(path)
- if err != nil {
- return
- }
- return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
-}
-
-func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
- r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
- if r1 != 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
- if r1 != 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
- var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(directoryName)
- if err != nil {
- return
- }
- return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes)
-}
-
-func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go
new file mode 100644
index 000000000..559d44325
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go
@@ -0,0 +1,19 @@
+package winapi
+
+const (
+ BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001
+ BINDFLT_FLAG_MERGED_BIND_MAPPING uint32 = 0x00000002
+ BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING uint32 = 0x00000004
+)
+
+// HRESULT
+// BfSetupFilter(
+// _In_opt_ HANDLE JobHandle,
+// _In_ ULONG Flags,
+// _In_ LPCWSTR VirtualizationRootPath,
+// _In_ LPCWSTR VirtualizationTargetPath,
+// _In_reads_opt_( VirtualizationExceptionPathCount ) LPCWSTR* VirtualizationExceptionPaths,
+// _In_opt_ ULONG VirtualizationExceptionPathCount
+// );
+//
+//sys BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter?
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go
new file mode 100644
index 000000000..21664577b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go
@@ -0,0 +1,47 @@
+//go:build windows
+
+package winapi
+
+import (
+ "unsafe"
+
+ "github.com/Microsoft/go-winio/pkg/guid"
+ "golang.org/x/sys/windows"
+)
+
+type g = guid.GUID
+type FsHandle uintptr
+type StreamHandle uintptr
+
+type CimFsFileMetadata struct {
+ Attributes uint32
+ FileSize int64
+
+ CreationTime windows.Filetime
+ LastWriteTime windows.Filetime
+ ChangeTime windows.Filetime
+ LastAccessTime windows.Filetime
+
+ SecurityDescriptorBuffer unsafe.Pointer
+ SecurityDescriptorSize uint32
+
+ ReparseDataBuffer unsafe.Pointer
+ ReparseDataSize uint32
+
+ ExtendedAttributes unsafe.Pointer
+ EACount uint32
+}
+
+//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage?
+//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage?
+
+//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage?
+//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage?
+//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage?
+
+//sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile?
+//sys CimCloseStream(cimStreamHandle StreamHandle) (hr error) = cimfs.CimCloseStream?
+//sys CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) = cimfs.CimWriteStream?
+//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath?
+//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink?
+//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream?
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
new file mode 100644
index 000000000..4547cdd8e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
@@ -0,0 +1,46 @@
+//go:build windows
+
+package winapi
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
+}
+
+// HRESULT WINAPI CreatePseudoConsole(
+// _In_ COORD size,
+// _In_ HANDLE hInput,
+// _In_ HANDLE hOutput,
+// _In_ DWORD dwFlags,
+// _Out_ HPCON* phPC
+// );
+//
+//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
+
+// void WINAPI ClosePseudoConsole(
+// _In_ HPCON hPC
+// );
+//
+//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
+
+// HRESULT WINAPI ResizePseudoConsole(
+// _In_ HPCON hPC ,
+// _In_ COORD size
+// );
+//
+//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go
index df28ea242..7875466ca 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winapi
import "github.com/Microsoft/go-winio/pkg/guid"
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go
new file mode 100644
index 000000000..9acc0bfc1
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go
@@ -0,0 +1,3 @@
+// Package winapi contains various low-level bindings to Windows APIs. It can
+// be thought of as an extension to golang.org/x/sys/windows.
+package winapi
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go
new file mode 100644
index 000000000..40cbf8712
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go
@@ -0,0 +1,11 @@
+//go:build windows
+
+package winapi
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+func IsElevated() bool {
+ return windows.GetCurrentProcessToken().IsElevated()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go
index 4e80ef68c..49ce924cb 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winapi
import "syscall"
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go
index 7ce52afd5..3dcb3faa0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go
@@ -1,5 +1,8 @@
+//go:build windows
+
package winapi
+//sys CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) = kernel32.CopyFileW
//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
@@ -34,34 +37,35 @@ const (
// Select entries from FILE_INFO_BY_HANDLE_CLASS.
//
// C declaration:
-// typedef enum _FILE_INFO_BY_HANDLE_CLASS {
-// FileBasicInfo,
-// FileStandardInfo,
-// FileNameInfo,
-// FileRenameInfo,
-// FileDispositionInfo,
-// FileAllocationInfo,
-// FileEndOfFileInfo,
-// FileStreamInfo,
-// FileCompressionInfo,
-// FileAttributeTagInfo,
-// FileIdBothDirectoryInfo,
-// FileIdBothDirectoryRestartInfo,
-// FileIoPriorityHintInfo,
-// FileRemoteProtocolInfo,
-// FileFullDirectoryInfo,
-// FileFullDirectoryRestartInfo,
-// FileStorageInfo,
-// FileAlignmentInfo,
-// FileIdInfo,
-// FileIdExtdDirectoryInfo,
-// FileIdExtdDirectoryRestartInfo,
-// FileDispositionInfoEx,
-// FileRenameInfoEx,
-// FileCaseSensitiveInfo,
-// FileNormalizedNameInfo,
-// MaximumFileInfoByHandleClass
-// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS;
+//
+// typedef enum _FILE_INFO_BY_HANDLE_CLASS {
+// FileBasicInfo,
+// FileStandardInfo,
+// FileNameInfo,
+// FileRenameInfo,
+// FileDispositionInfo,
+// FileAllocationInfo,
+// FileEndOfFileInfo,
+// FileStreamInfo,
+// FileCompressionInfo,
+// FileAttributeTagInfo,
+// FileIdBothDirectoryInfo,
+// FileIdBothDirectoryRestartInfo,
+// FileIoPriorityHintInfo,
+// FileRemoteProtocolInfo,
+// FileFullDirectoryInfo,
+// FileFullDirectoryRestartInfo,
+// FileStorageInfo,
+// FileAlignmentInfo,
+// FileIdInfo,
+// FileIdExtdDirectoryInfo,
+// FileIdExtdDirectoryRestartInfo,
+// FileDispositionInfoEx,
+// FileRenameInfoEx,
+// FileCaseSensitiveInfo,
+// FileNormalizedNameInfo,
+// MaximumFileInfoByHandleClass
+// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS;
//
// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class
const (
@@ -98,10 +102,11 @@ type FileLinkInformation struct {
}
// C declaration:
-// typedef struct _FILE_ID_INFO {
-// ULONGLONG VolumeSerialNumber;
-// FILE_ID_128 FileId;
-// } FILE_ID_INFO, *PFILE_ID_INFO;
+//
+// typedef struct _FILE_ID_INFO {
+// ULONGLONG VolumeSerialNumber;
+// FILE_ID_128 FileId;
+// } FILE_ID_INFO, *PFILE_ID_INFO;
//
// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info
type FILE_ID_INFO struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
deleted file mode 100644
index 4e609cbf1..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package winapi
-
-//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
index ba12b1ad9..4c04dd3f8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winapi
import (
@@ -24,7 +26,10 @@ const (
// Access rights for creating or opening job objects.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
-const JOB_OBJECT_ALL_ACCESS = 0x1F001F
+const (
+ JOB_OBJECT_QUERY = 0x0004
+ JOB_OBJECT_ALL_ACCESS = 0x1F003F
+)
// IO limit flags
//
@@ -52,6 +57,8 @@ const (
JobObjectLimitViolationInformation uint32 = 13
JobObjectMemoryUsageInformation uint32 = 28
JobObjectNotificationLimitInformation2 uint32 = 33
+ JobObjectCreateSilo uint32 = 35
+ JobObjectSiloBasicInformation uint32 = 36
JobObjectIoAttribution uint32 = 42
)
@@ -93,7 +100,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
// AllPids returns all the process Ids in the job object.
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
- return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
+ return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
}
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
@@ -108,29 +115,27 @@ type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct {
TotalTerminateProcesses uint32
}
-//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information
+// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information
type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct {
BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION
IoInfo windows.IO_COUNTERS
}
-// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION {
-// ULONG64 JobMemory;
-// ULONG64 PeakJobMemoryUsed;
-// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION;
-//
+// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION {
+// ULONG64 JobMemory;
+// ULONG64 PeakJobMemoryUsed;
+// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION;
type JOBOBJECT_MEMORY_USAGE_INFORMATION struct {
JobMemory uint64
PeakJobMemoryUsed uint64
}
-// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS {
-// ULONG_PTR IoCount;
-// ULONGLONG TotalNonOverlappedQueueTime;
-// ULONGLONG TotalNonOverlappedServiceTime;
-// ULONGLONG TotalSize;
-// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS;
-//
+// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS {
+// ULONG_PTR IoCount;
+// ULONGLONG TotalNonOverlappedQueueTime;
+// ULONGLONG TotalNonOverlappedServiceTime;
+// ULONGLONG TotalSize;
+// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS;
type JOBOBJECT_IO_ATTRIBUTION_STATS struct {
IoCount uintptr
TotalNonOverlappedQueueTime uint64
@@ -138,12 +143,11 @@ type JOBOBJECT_IO_ATTRIBUTION_STATS struct {
TotalSize uint64
}
-// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION {
-// ULONG ControlFlags;
-// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats;
-// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats;
-// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION;
-//
+// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION {
+// ULONG ControlFlags;
+// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats;
+// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats;
+// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION;
type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct {
ControlFlags uint32
ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS
@@ -156,13 +160,28 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
CompletionPort windows.Handle
}
+// typedef struct _SILOOBJECT_BASIC_INFORMATION {
+// DWORD SiloId;
+// DWORD SiloParentId;
+// DWORD NumberOfProcesses;
+// BOOLEAN IsInServerSilo;
+// BYTE Reserved[3];
+// } SILOOBJECT_BASIC_INFORMATION, *PSILOOBJECT_BASIC_INFORMATION;
+type SILOOBJECT_BASIC_INFORMATION struct {
+ SiloID uint32
+ SiloParentID uint32
+ NumberOfProcesses uint32
+ IsInServerSilo bool
+ Reserved [3]uint8
+}
+
// BOOL IsProcessInJob(
// HANDLE ProcessHandle,
// HANDLE JobHandle,
// PBOOL Result
// );
//
-//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
+//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
// BOOL QueryInformationJobObject(
// HANDLE hJob,
@@ -172,7 +191,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// LPDWORD lpReturnLength
// );
//
-//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
+//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
// HANDLE OpenJobObjectW(
// DWORD dwDesiredAccess,
@@ -195,6 +214,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks,
// ULONG *InfoBlockCount
// );
+//
//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject
// NTSTATUS
@@ -203,6 +223,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// _In_ ACCESS_MASK DesiredAccess,
// _In_ POBJECT_ATTRIBUTES ObjectAttributes
// );
+//
//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject
// NTSTATUS
@@ -212,4 +233,5 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// _In_ ACCESS_MASK DesiredAccess,
// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes
// );
+//
//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
index 83f704064..53f62948c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
@@ -1,27 +1,4 @@
package winapi
-// VOID RtlMoveMemory(
-// _Out_ VOID UNALIGNED *Destination,
-// _In_ const VOID UNALIGNED *Source,
-// _In_ SIZE_T Length
-// );
-//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
-
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
//sys LocalFree(ptr uintptr) = kernel32.LocalFree
-
-// BOOL QueryWorkingSet(
-// HANDLE hProcess,
-// PVOID pv,
-// DWORD cb
-// );
-//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet
-
-type PSAPI_WORKING_SET_INFORMATION struct {
- NumberOfEntries uintptr
- WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK
-}
-
-type PSAPI_WORKING_SET_BLOCK struct {
- Flags uintptr
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go
new file mode 100644
index 000000000..c578b3d31
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go
@@ -0,0 +1,37 @@
+package winapi
+
+// Offline registry management API
+
+type ORHKey uintptr
+
+type RegType uint32
+
+const (
+ // Registry value types: https://docs.microsoft.com/en-us/windows/win32/sysinfo/registry-value-types
+ REG_TYPE_NONE RegType = 0
+ REG_TYPE_SZ RegType = 1
+ REG_TYPE_EXPAND_SZ RegType = 2
+ REG_TYPE_BINARY RegType = 3
+ REG_TYPE_DWORD RegType = 4
+ REG_TYPE_DWORD_LITTLE_ENDIAN RegType = 4
+ REG_TYPE_DWORD_BIG_ENDIAN RegType = 5
+ REG_TYPE_LINK RegType = 6
+ REG_TYPE_MULTI_SZ RegType = 7
+ REG_TYPE_RESOURCE_LIST RegType = 8
+ REG_TYPE_FULL_RESOURCE_DESCRIPTOR RegType = 9
+ REG_TYPE_RESOURCE_REQUIREMENTS_LIST RegType = 10
+ REG_TYPE_QWORD RegType = 11
+ REG_TYPE_QWORD_LITTLE_ENDIAN RegType = 11
+)
+
+//sys ORCreateHive(key *ORHKey) (win32err error) = offreg.ORCreateHive
+//sys ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) = offreg.ORMergeHives
+//sys OROpenHive(hivePath string, result *ORHKey) (win32err error) = offreg.OROpenHive
+//sys ORCloseHive(handle ORHKey) (win32err error) = offreg.ORCloseHive
+//sys ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) = offreg.ORSaveHive
+//sys OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) = offreg.OROpenKey
+//sys ORCloseKey(handle ORHKey) (win32err error) = offreg.ORCloseKey
+//sys ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) = offreg.ORCreateKey
+//sys ORDeleteKey(handle ORHKey, subKey string) (win32err error) = offreg.ORDeleteKey
+//sys ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) = offreg.ORGetValue
+//sys ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) = offreg.ORSetValue
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go
index 908920e87..c6a149b55 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go
@@ -8,4 +8,5 @@ package winapi
// LPWSTR lpBuffer,
// LPWSTR *lpFilePart
// );
+//
//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
index b87068327..f4ae94cfa 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
@@ -2,9 +2,60 @@ package winapi
const PROCESS_ALL_ACCESS uint32 = 2097151
-// DWORD GetProcessImageFileNameW(
-// HANDLE hProcess,
-// LPWSTR lpImageFileName,
-// DWORD nSize
+const (
+ PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
+ PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
+)
+
+// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
+const ProcessVmCounters = 3
+
+// __kernel_entry NTSTATUS NtQueryInformationProcess(
+// [in] HANDLE ProcessHandle,
+// [in] PROCESSINFOCLASS ProcessInformationClass,
+// [out] PVOID ProcessInformation,
+// [in] ULONG ProcessInformationLength,
+// [out, optional] PULONG ReturnLength
// );
-//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
+//
+//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
+
+// typedef struct _VM_COUNTERS_EX {
+// SIZE_T PeakVirtualSize;
+// SIZE_T VirtualSize;
+// ULONG PageFaultCount;
+// SIZE_T PeakWorkingSetSize;
+// SIZE_T WorkingSetSize;
+// SIZE_T QuotaPeakPagedPoolUsage;
+// SIZE_T QuotaPagedPoolUsage;
+// SIZE_T QuotaPeakNonPagedPoolUsage;
+// SIZE_T QuotaNonPagedPoolUsage;
+// SIZE_T PagefileUsage;
+// SIZE_T PeakPagefileUsage;
+// SIZE_T PrivateUsage;
+// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
+type VM_COUNTERS_EX struct {
+ PeakVirtualSize uintptr
+ VirtualSize uintptr
+ PageFaultCount uint32
+ PeakWorkingSetSize uintptr
+ WorkingSetSize uintptr
+ QuotaPeakPagedPoolUsage uintptr
+ QuotaPagedPoolUsage uintptr
+ QuotaPeakNonPagedPoolUsage uintptr
+ QuotaNonPagedPoolUsage uintptr
+ PagefileUsage uintptr
+ PeakPagefileUsage uintptr
+ PrivateUsage uintptr
+}
+
+// typedef struct _VM_COUNTERS_EX2 {
+// VM_COUNTERS_EX CountersEx;
+// SIZE_T PrivateWorkingSetSize;
+// SIZE_T SharedCommitUsage;
+// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
+type VM_COUNTERS_EX2 struct {
+ CountersEx VM_COUNTERS_EX
+ PrivateWorkingSetSize uintptr
+ SharedCommitUsage uintptr
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
index 327f57d7c..cb494aaa6 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winapi
import "golang.org/x/sys/windows"
@@ -12,7 +14,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
// ULONG SystemInformationLength,
// PULONG ReturnLength
// );
-//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
+//
+//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
type SYSTEM_PROCESS_INFORMATION struct {
NextEntryOffset uint32 // ULONG
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go
index 4724713e3..f23141a83 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go
@@ -9,4 +9,5 @@ package winapi
// DWORD dwCreationFlags,
// LPDWORD lpThreadId
// );
+//
//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go
new file mode 100644
index 000000000..84d4cc294
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go
@@ -0,0 +1,194 @@
+//go:build windows
+
+package winapi
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+const UserNameCharLimit = 20
+
+const (
+ USER_PRIV_GUEST uint32 = iota
+ USER_PRIV_USER
+ USER_PRIV_ADMIN
+)
+
+const (
+ UF_NORMAL_ACCOUNT = 0x00200
+ UF_DONT_EXPIRE_PASSWD = 0x10000
+)
+
+const NERR_UserNotFound = syscall.Errno(0x8AD)
+
+// typedef struct _LOCALGROUP_MEMBERS_INFO_0 {
+// PSID lgrmi0_sid;
+// } LOCALGROUP_MEMBERS_INFO_0, *PLOCALGROUP_MEMBERS_INFO_0, *LPLOCALGROUP_MEMBERS_INFO_0;
+type LocalGroupMembersInfo0 struct {
+ Sid *windows.SID
+}
+
+// typedef struct _LOCALGROUP_INFO_1 {
+// LPWSTR lgrpi1_name;
+// LPWSTR lgrpi1_comment;
+// } LOCALGROUP_INFO_1, *PLOCALGROUP_INFO_1, *LPLOCALGROUP_INFO_1;
+type LocalGroupInfo1 struct {
+ Name *uint16
+ Comment *uint16
+}
+
+// typedef struct _USER_INFO_1 {
+// LPWSTR usri1_name;
+// LPWSTR usri1_password;
+// DWORD usri1_password_age;
+// DWORD usri1_priv;
+// LPWSTR usri1_home_dir;
+// LPWSTR usri1_comment;
+// DWORD usri1_flags;
+// LPWSTR usri1_script_path;
+// } USER_INFO_1, *PUSER_INFO_1, *LPUSER_INFO_1;
+type UserInfo1 struct {
+ Name *uint16
+ Password *uint16
+ PasswordAge uint32
+ Priv uint32
+ HomeDir *uint16
+ Comment *uint16
+ Flags uint32
+ ScriptPath *uint16
+}
+
+// NET_API_STATUS NET_API_FUNCTION NetLocalGroupGetInfo(
+// [in] LPCWSTR servername,
+// [in] LPCWSTR groupname,
+// [in] DWORD level,
+// [out] LPBYTE *bufptr
+// );
+//
+//sys netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) = netapi32.NetLocalGroupGetInfo
+
+// NetLocalGroupGetInfo is a slightly go friendlier wrapper around the NetLocalGroupGetInfo function. Instead of taking in *uint16's, it takes in
+// go strings and does the conversion internally.
+func NetLocalGroupGetInfo(serverName, groupName string, level uint32, bufPtr **byte) (err error) {
+ var (
+ serverNameUTF16 *uint16
+ groupNameUTF16 *uint16
+ )
+ if serverName != "" {
+ serverNameUTF16, err = windows.UTF16PtrFromString(serverName)
+ if err != nil {
+ return err
+ }
+ }
+ if groupName != "" {
+ groupNameUTF16, err = windows.UTF16PtrFromString(groupName)
+ if err != nil {
+ return err
+ }
+ }
+ return netLocalGroupGetInfo(
+ serverNameUTF16,
+ groupNameUTF16,
+ level,
+ bufPtr,
+ )
+}
+
+// NET_API_STATUS NET_API_FUNCTION NetUserAdd(
+// [in] LPCWSTR servername,
+// [in] DWORD level,
+// [in] LPBYTE buf,
+// [out] LPDWORD parm_err
+// );
+//
+//sys netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) = netapi32.NetUserAdd
+
+// NetUserAdd is a slightly go friendlier wrapper around the NetUserAdd function. Instead of taking in *uint16's, it takes in
+// go strings and does the conversion internally.
+func NetUserAdd(serverName string, level uint32, buf *byte, parm_err *uint32) (err error) {
+ var serverNameUTF16 *uint16
+ if serverName != "" {
+ serverNameUTF16, err = windows.UTF16PtrFromString(serverName)
+ if err != nil {
+ return err
+ }
+ }
+ return netUserAdd(
+ serverNameUTF16,
+ level,
+ buf,
+ parm_err,
+ )
+}
+
+// NET_API_STATUS NET_API_FUNCTION NetUserDel(
+// [in] LPCWSTR servername,
+// [in] LPCWSTR username
+// );
+//
+//sys netUserDel(serverName *uint16, username *uint16) (status error) = netapi32.NetUserDel
+
+// NetUserDel is a slightly go friendlier wrapper around the NetUserDel function. Instead of taking in *uint16's, it takes in
+// go strings and does the conversion internally.
+func NetUserDel(serverName, userName string) (err error) {
+ var (
+ serverNameUTF16 *uint16
+ userNameUTF16 *uint16
+ )
+ if serverName != "" {
+ serverNameUTF16, err = windows.UTF16PtrFromString(serverName)
+ if err != nil {
+ return err
+ }
+ }
+ if userName != "" {
+ userNameUTF16, err = windows.UTF16PtrFromString(userName)
+ if err != nil {
+ return err
+ }
+ }
+ return netUserDel(
+ serverNameUTF16,
+ userNameUTF16,
+ )
+}
+
+// NET_API_STATUS NET_API_FUNCTION NetLocalGroupAddMembers(
+// [in] LPCWSTR servername,
+// [in] LPCWSTR groupname,
+// [in] DWORD level,
+// [in] LPBYTE buf,
+// [in] DWORD totalentries
+// );
+//
+//sys netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) = netapi32.NetLocalGroupAddMembers
+
+// NetLocalGroupAddMembers is a slightly go friendlier wrapper around the NetLocalGroupAddMembers function. Instead of taking in *uint16's, it takes in
+// go strings and does the conversion internally.
+func NetLocalGroupAddMembers(serverName, groupName string, level uint32, buf *byte, totalEntries uint32) (err error) {
+ var (
+ serverNameUTF16 *uint16
+ groupNameUTF16 *uint16
+ )
+ if serverName != "" {
+ serverNameUTF16, err = windows.UTF16PtrFromString(serverName)
+ if err != nil {
+ return err
+ }
+ }
+ if groupName != "" {
+ groupNameUTF16, err = windows.UTF16PtrFromString(groupName)
+ if err != nil {
+ return err
+ }
+ }
+ return netLocalGroupAddMembers(
+ serverNameUTF16,
+ groupNameUTF16,
+ level,
+ buf,
+ totalEntries,
+ )
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
index db59567d0..70c43fc1c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
@@ -1,8 +1,9 @@
+//go:build windows
+
package winapi
import (
"errors"
- "reflect"
"syscall"
"unsafe"
@@ -12,44 +13,45 @@ import (
// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice
// for easier interop with Go APIs
func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) {
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result))
- hdr.Data = uintptr(unsafe.Pointer(buffer))
- hdr.Cap = bufferLength
- hdr.Len = bufferLength
-
+ result = unsafe.Slice(buffer, bufferLength)
return
}
+// UnicodeString corresponds to UNICODE_STRING win32 struct defined here
+// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string
type UnicodeString struct {
Length uint16
MaximumLength uint16
Buffer *uint16
}
-//String converts a UnicodeString to a golang string
+// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value
+// denotes the maximum number of wide chars a path can have.
+const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767
+
+// String converts a UnicodeString to a golang string
func (uni UnicodeString) String() string {
// UnicodeString is not guaranteed to be null terminated, therefore
// use the UnicodeString's Length field
- return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
+ return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
}
// NewUnicodeString allocates a new UnicodeString and copies `s` into
// the buffer of the new UnicodeString.
func NewUnicodeString(s string) (*UnicodeString, error) {
- // Get length of original `s` to use in the UnicodeString since the `buf`
- // created later will have an additional trailing null character
- length := len(s)
- if length > 32767 {
- return nil, syscall.ENAMETOOLONG
- }
-
buf, err := windows.UTF16FromString(s)
if err != nil {
return nil, err
}
+
+ if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH {
+ return nil, syscall.ENAMETOOLONG
+ }
+
uni := &UnicodeString{
- Length: uint16(length * 2),
- MaximumLength: uint16(length * 2),
+ // The length is in bytes and should not include the trailing null character.
+ Length: uint16((len(buf) - 1) * 2),
+ MaximumLength: uint16((len(buf) - 1) * 2),
Buffer: &buf[0],
}
return uni, nil
@@ -73,3 +75,9 @@ func ConvertStringSetToSlice(buf []byte) ([]string, error) {
}
return nil, errors.New("string set malformed: missing null terminator at end of buffer")
}
+
+// ParseUtf16LE parses a UTF-16LE byte array into a string (without passing
+// through a uint16 or rune array).
+func ParseUtf16LE(b []byte) string {
+ return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&b[0])))
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
index ec88c0d21..6a90e3a69 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
@@ -1,5 +1,3 @@
-// Package winapi contains various low-level bindings to Windows APIs. It can
-// be thought of as an extension to golang.org/x/sys/windows.
package winapi
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
index 2941b0f98..ecdded312 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package winapi
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,255 +29,357 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
var (
- modntdll = windows.NewLazySystemDLL("ntdll.dll")
- modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
- modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
- modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
- modpsapi = windows.NewLazySystemDLL("psapi.dll")
- modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll")
+ modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
+ modcimfs = windows.NewLazySystemDLL("cimfs.dll")
+ modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modnetapi32 = windows.NewLazySystemDLL("netapi32.dll")
+ modntdll = windows.NewLazySystemDLL("ntdll.dll")
+ modoffreg = windows.NewLazySystemDLL("offreg.dll")
- procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
+ procLogonUserW = modadvapi32.NewProc("LogonUserW")
+ procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter")
+ procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW")
+ procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
+ procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
+ procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
+ procCimCloseImage = modcimfs.NewProc("CimCloseImage")
+ procCimCloseStream = modcimfs.NewProc("CimCloseStream")
+ procCimCommitImage = modcimfs.NewProc("CimCommitImage")
+ procCimCreateAlternateStream = modcimfs.NewProc("CimCreateAlternateStream")
+ procCimCreateFile = modcimfs.NewProc("CimCreateFile")
+ procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink")
+ procCimCreateImage = modcimfs.NewProc("CimCreateImage")
+ procCimDeletePath = modcimfs.NewProc("CimDeletePath")
+ procCimDismountImage = modcimfs.NewProc("CimDismountImage")
+ procCimMountImage = modcimfs.NewProc("CimMountImage")
+ procCimWriteStream = modcimfs.NewProc("CimWriteStream")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
- procSearchPathW = modkernel32.NewProc("SearchPathW")
+ procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
+ procCopyFileW = modkernel32.NewProc("CopyFileW")
+ procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
- procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
- procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
- procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject")
- procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject")
- procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject")
- procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject")
- procLogonUserW = modadvapi32.NewProc("LogonUserW")
- procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
- procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet")
- procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
- procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
- procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
- procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
- procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
- procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW")
+ procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
+ procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
+ procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject")
+ procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
+ procSearchPathW = modkernel32.NewProc("SearchPathW")
+ procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject")
+ procNetLocalGroupAddMembers = modnetapi32.NewProc("NetLocalGroupAddMembers")
+ procNetLocalGroupGetInfo = modnetapi32.NewProc("NetLocalGroupGetInfo")
+ procNetUserAdd = modnetapi32.NewProc("NetUserAdd")
+ procNetUserDel = modnetapi32.NewProc("NetUserDel")
procNtCreateFile = modntdll.NewProc("NtCreateFile")
- procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
+ procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject")
procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject")
+ procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject")
procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject")
+ procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
+ procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
+ procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
+ procORCloseHive = modoffreg.NewProc("ORCloseHive")
+ procORCloseKey = modoffreg.NewProc("ORCloseKey")
+ procORCreateHive = modoffreg.NewProc("ORCreateHive")
+ procORCreateKey = modoffreg.NewProc("ORCreateKey")
+ procORDeleteKey = modoffreg.NewProc("ORDeleteKey")
+ procORGetValue = modoffreg.NewProc("ORGetValue")
+ procORMergeHives = modoffreg.NewProc("ORMergeHives")
+ procOROpenHive = modoffreg.NewProc("OROpenHive")
+ procOROpenKey = modoffreg.NewProc("OROpenKey")
+ procORSaveHive = modoffreg.NewProc("ORSaveHive")
+ procORSetValue = modoffreg.NewProc("ORSetValue")
)
-func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
- r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
- status = uint32(r0)
+func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLogonUserW.Addr(), uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
return
}
-func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
- r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0)
- if r0 != 0 {
- win32Err = syscall.Errno(r0)
+func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) {
+ hr = procBfSetupFilter.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procBfSetupFilter.Addr(), uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
-func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
- size = uint32(r0)
- if size == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
+ r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_PropertyW.Addr(), uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0)
- handle = windows.Handle(r0)
- if handle == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_ListA.Addr(), uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_List_SizeA.Addr(), uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
- r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(pDeviceID)
+ if hr != nil {
+ return
+ }
+ return _CMLocateDevNode(pdnDevInst, _p0, uFlags)
+}
+
+func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
+ r0, _, _ := syscall.SyscallN(procCM_Locate_DevNodeW.Addr(), uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func CimCloseImage(cimFSHandle FsHandle) (err error) {
+ err = procCimCloseImage.Find()
+ if err != nil {
+ return
}
+ syscall.SyscallN(procCimCloseImage.Addr(), uintptr(cimFSHandle))
return
}
-func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
- var _p0 uint32
- if inheritHandle {
- _p0 = 1
- } else {
- _p0 = 0
+func CimCloseStream(cimStreamHandle StreamHandle) (hr error) {
+ hr = procCimCloseStream.Find()
+ if hr != nil {
+ return
}
- r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
- handle = windows.Handle(r0)
- if handle == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+ r0, _, _ := syscall.SyscallN(procCimCloseStream.Addr(), uintptr(cimStreamHandle))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0)
- ret = uint32(r0)
- if ret == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CimCommitImage(cimFSHandle FsHandle) (hr error) {
+ hr = procCimCommitImage.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimCommitImage.Addr(), uintptr(cimFSHandle))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0)
- ret = uint32(r0)
- if ret == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
+func CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
}
- return
+ return _CimCreateAlternateStream(cimFSHandle, _p0, size, cimStreamHandle)
}
-func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
- r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
- status = uint32(r0)
+func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, cimStreamHandle *StreamHandle) (hr error) {
+ hr = procCimCreateAlternateStream.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimCreateAlternateStream.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
return
}
-func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
- r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
- status = uint32(r0)
- return
+func CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _CimCreateFile(cimFSHandle, _p0, file, cimStreamHandle)
}
-func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
- r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) {
+ hr = procCimCreateFile.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimCreateFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(newPath)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(oldPath)
+ if hr != nil {
+ return
+ }
+ return _CimCreateHardLink(cimFSHandle, _p0, _p1)
+}
+
+func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) {
+ hr = procCimCreateHardLink.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimCreateHardLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func LocalAlloc(flags uint32, size int) (ptr uintptr) {
- r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
- ptr = uintptr(r0)
- return
+func CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(imagePath)
+ if hr != nil {
+ return
+ }
+ return _CimCreateImage(_p0, oldFSName, newFSName, cimFSHandle)
}
-func LocalFree(ptr uintptr) {
- syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
+func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
+ hr = procCimCreateImage.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimCreateImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
return
}
-func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _CimDeletePath(cimFSHandle, _p0)
+}
+
+func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) {
+ hr = procCimDeletePath.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimDeletePath.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
- size = uint32(r0)
- if size == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
+func CimDismountImage(volumeID *g) (hr error) {
+ hr = procCimDismountImage.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimDismountImage.Addr(), uintptr(unsafe.Pointer(volumeID)))
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
}
+ hr = syscall.Errno(r0)
}
return
}
-func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
- r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
- amount = uint32(r0)
- return
+func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(imagePath)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(fsName)
+ if hr != nil {
+ return
+ }
+ return _CimMountImage(_p0, _p1, flags, volumeID)
}
-func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
+func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g) (hr error) {
+ hr = procCimMountImage.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimMountImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -284,8 +389,12 @@ func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr e
return
}
-func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0)
+func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) {
+ hr = procCimWriteStream.Find()
+ if hr != nil {
+ return
+ }
+ r0, _, _ := syscall.SyscallN(procCimWriteStream.Addr(), uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -295,17 +404,29 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u
return
}
-func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) {
- var _p0 *uint16
- _p0, hr = syscall.UTF16PtrFromString(pDeviceID)
- if hr != nil {
- return
+func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
+ r0, _, _ := syscall.SyscallN(procSetJobCompartmentId.Addr(), uintptr(handle), uintptr(compartmentId))
+ if r0 != 0 {
+ win32Err = syscall.Errno(r0)
}
- return _CMLocateDevNode(pdnDevInst, _p0, uFlags)
+ return
}
-func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
+func ClosePseudoConsole(hpc windows.Handle) {
+ syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(hpc))
+ return
+}
+
+func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procCopyFileW.Addr(), uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
+ r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -315,8 +436,72 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr
return
}
-func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
- r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
+func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateRemoteThread.Addr(), uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)))
+ handle = windows.Handle(r0)
+ if handle == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
+ r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
+ amount = uint32(r0)
+ return
+}
+
+func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procIsProcessInJob.Addr(), uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func LocalAlloc(flags uint32, size int) (ptr uintptr) {
+ r0, _, _ := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(size))
+ ptr = uintptr(r0)
+ return
+}
+
+func LocalFree(ptr uintptr) {
+ syscall.SyscallN(procLocalFree.Addr(), uintptr(ptr))
+ return
+}
+
+func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
+ var _p0 uint32
+ if inheritHandle {
+ _p0 = 1
+ }
+ r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
+ handle = windows.Handle(r0)
+ if handle == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) {
+ r0, _, e1 := syscall.SyscallN(procQueryIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)))
+ ret = uint32(r0)
+ if ret == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
+ r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(hPc), uintptr(size))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@@ -326,20 +511,76 @@ func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyTyp
return
}
+func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) {
+ r0, _, e1 := syscall.SyscallN(procSearchPathW.Addr(), uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
+ size = uint32(r0)
+ if size == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
+ r0, _, e1 := syscall.SyscallN(procSetIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)))
+ ret = uint32(r0)
+ if ret == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) {
+ r0, _, _ := syscall.SyscallN(procNetLocalGroupAddMembers.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries))
+ if r0 != 0 {
+ status = syscall.Errno(r0)
+ }
+ return
+}
+
+func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) {
+ r0, _, _ := syscall.SyscallN(procNetLocalGroupGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)))
+ if r0 != 0 {
+ status = syscall.Errno(r0)
+ }
+ return
+}
+
+func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) {
+ r0, _, _ := syscall.SyscallN(procNetUserAdd.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)))
+ if r0 != 0 {
+ status = syscall.Errno(r0)
+ }
+ return
+}
+
+func netUserDel(serverName *uint16, username *uint16) (status error) {
+ r0, _, _ := syscall.SyscallN(procNetUserDel.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)))
+ if r0 != 0 {
+ status = syscall.Errno(r0)
+ }
+ return
+}
+
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
- r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength))
status = uint32(r0)
return
}
-func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
+func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
+ r0, _, _ := syscall.SyscallN(procNtCreateJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
status = uint32(r0)
return
}
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
- r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
+ r0, _, _ := syscall.SyscallN(procNtOpenDirectoryObject.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
+ status = uint32(r0)
+ return
+}
+
+func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
+ r0, _, _ := syscall.SyscallN(procNtOpenJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
status = uint32(r0)
return
}
@@ -348,24 +589,198 @@ func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleE
var _p0 uint32
if singleEntry {
_p0 = 1
- } else {
- _p0 = 0
}
var _p1 uint32
if restartScan {
_p1 = 1
- } else {
- _p1 = 0
}
- r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtQueryDirectoryObject.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)))
+ status = uint32(r0)
+ return
+}
+
+func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
+ r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)))
+ status = uint32(r0)
+ return
+}
+
+func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
+ r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)))
+ status = uint32(r0)
+ return
+}
+
+func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
+ r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class))
status = uint32(r0)
return
}
func RtlNtStatusToDosError(status uint32) (winerr error) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosError.Addr(), uintptr(status))
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
+
+func ORCloseHive(handle ORHKey) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORCloseHive.Addr(), uintptr(handle))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORCloseKey(handle ORHKey) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORCloseKey.Addr(), uintptr(handle))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORCreateHive(key *ORHKey) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORCreateHive.Addr(), uintptr(unsafe.Pointer(key)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(subKey)
+ if win32err != nil {
+ return
+ }
+ return _ORCreateKey(handle, _p0, class, options, securityDescriptor, result, disposition)
+}
+
+func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORCreateKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORDeleteKey(handle ORHKey, subKey string) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(subKey)
+ if win32err != nil {
+ return
+ }
+ return _ORDeleteKey(handle, _p0)
+}
+
+func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORDeleteKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(subKey)
+ if win32err != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, win32err = syscall.UTF16PtrFromString(value)
+ if win32err != nil {
+ return
+ }
+ return _ORGetValue(handle, _p0, _p1, valueType, data, dataLen)
+}
+
+func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORGetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) {
+ var _p0 *ORHKey
+ if len(hiveHandles) > 0 {
+ _p0 = &hiveHandles[0]
+ }
+ r0, _, _ := syscall.SyscallN(procORMergeHives.Addr(), uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func OROpenHive(hivePath string, result *ORHKey) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(hivePath)
+ if win32err != nil {
+ return
+ }
+ return _OROpenHive(_p0, result)
+}
+
+func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procOROpenHive.Addr(), uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(subKey)
+ if win32err != nil {
+ return
+ }
+ return _OROpenKey(handle, _p0, result)
+}
+
+func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procOROpenKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(hivePath)
+ if win32err != nil {
+ return
+ }
+ return _ORSaveHive(handle, _p0, osMajorVersion, osMinorVersion)
+}
+
+func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORSaveHive.Addr(), uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
+
+func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) {
+ var _p0 *uint16
+ _p0, win32err = syscall.UTF16PtrFromString(valueName)
+ if win32err != nil {
+ return
+ }
+ return _ORSetValue(handle, _p0, valueType, data, dataLen)
+}
+
+func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) {
+ r0, _, _ := syscall.SyscallN(procORSetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go
index 891616370..7e9c9fbbe 100644
--- a/vendor/github.com/Microsoft/hcsshim/layer.go
+++ b/vendor/github.com/Microsoft/hcsshim/layer.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
@@ -30,6 +32,7 @@ func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPa
func DeactivateLayer(info DriverInfo, id string) error {
return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id))
}
+
func DestroyLayer(info DriverInfo, id string) error {
return wclayer.DestroyLayer(context.Background(), layerPath(&info, id))
}
@@ -68,6 +71,9 @@ func ProcessUtilityVMImage(path string) error {
func UnprepareLayer(info DriverInfo, layerId string) error {
return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId))
}
+func ConvertToBaseLayer(path string) error {
+ return wclayer.ConvertToBaseLayer(context.Background(), path)
+}
type DriverInfo struct {
Flavour int
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
index 3ab3bcd89..3227ebe89 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
@@ -5,6 +5,7 @@ import (
"sync"
"golang.org/x/sys/windows"
+ "golang.org/x/sys/windows/registry"
)
// OSVersion is a wrapper for Windows version information
@@ -25,16 +26,15 @@ var (
// The calling application must be manifested to get the correct version information.
func Get() OSVersion {
once.Do(func() {
- var err error
+ v := *windows.RtlGetVersion()
osv = OSVersion{}
- osv.Version, err = windows.GetVersion()
- if err != nil {
- // GetVersion never fails.
- panic(err)
- }
- osv.MajorVersion = uint8(osv.Version & 0xFF)
- osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
- osv.Build = uint16(osv.Version >> 16)
+ osv.MajorVersion = uint8(v.MajorVersion)
+ osv.MinorVersion = uint8(v.MinorVersion)
+ osv.Build = uint16(v.BuildNumber)
+ // Fill version value so that existing clients don't break
+ osv.Version = v.BuildNumber << 16
+ osv.Version = osv.Version | (uint32(v.MinorVersion) << 8)
+ osv.Version = osv.Version | v.MajorVersion
})
return osv
}
@@ -45,6 +45,30 @@ func Build() uint16 {
return Get().Build
}
-func (osv OSVersion) ToString() string {
+// String returns the OSVersion formatted as a string. It implements the
+// [fmt.Stringer] interface.
+func (osv OSVersion) String() string {
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
}
+
+// ToString returns the OSVersion formatted as a string.
+//
+// Deprecated: use [OSVersion.String].
+func (osv OSVersion) ToString() string {
+ return osv.String()
+}
+
+// Running `cmd /c ver` shows something like "10.0.20348.1000". The last component ("1000") is the revision
+// number
+func BuildRevision() (uint32, error) {
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+ if err != nil {
+ return 0, fmt.Errorf("open `CurrentVersion` registry key: %w", err)
+ }
+ defer k.Close()
+ s, _, err := k.GetIntegerValue("UBR")
+ if err != nil {
+ return 0, fmt.Errorf("read `UBR` from registry: %w", err)
+ }
+ return uint32(s), nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go
new file mode 100644
index 000000000..f8d411ad7
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go
@@ -0,0 +1,35 @@
+package osversion
+
+// List of stable ABI compliant ltsc releases
+// Note: List must be sorted in ascending order
+var compatLTSCReleases = []uint16{
+ V21H2Server,
+}
+
+// CheckHostAndContainerCompat checks if given host and container
+// OS versions are compatible.
+// It includes support for stable ABI compliant versions as well.
+// Every release after WS 2022 will support the previous ltsc
+// container image. Stable ABI is in preview mode for windows 11 client.
+// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility
+func CheckHostAndContainerCompat(host, ctr OSVersion) bool {
+ // check major minor versions of host and guest
+ if host.MajorVersion != ctr.MajorVersion ||
+ host.MinorVersion != ctr.MinorVersion {
+ return false
+ }
+
+ // If host is < WS 2022, exact version match is required
+ if host.Build < V21H2Server {
+ return host.Build == ctr.Build
+ }
+
+ var supportedLtscRelease uint16
+ for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
+ if host.Build >= compatLTSCReleases[i] {
+ supportedLtscRelease = compatLTSCReleases[i]
+ break
+ }
+ }
+ return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
index e9267b955..446369591 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
@@ -1,38 +1,84 @@
package osversion
+// Windows Client and Server build numbers.
+//
+// See:
+// https://learn.microsoft.com/en-us/windows/release-health/release-information
+// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info
+// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
const (
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
RS1 = 14393
+ // V1607 (version 1607, codename "Redstone 1") is an alias for [RS1].
+ V1607 = RS1
+ // LTSC2016 (Windows Server 2016) is an alias for [RS1].
+ LTSC2016 = RS1
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
// corresponds to Windows 10 (Creators Update).
RS2 = 15063
+ // V1703 (version 1703, codename "Redstone 2") is an alias for [RS2].
+ V1703 = RS2
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
RS3 = 16299
+ // V1709 (version 1709, codename "Redstone 3") is an alias for [RS3].
+ V1709 = RS3
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
RS4 = 17134
+ // V1803 (version 1803, codename "Redstone 4") is an alias for [RS4].
+ V1803 = RS4
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
RS5 = 17763
+ // V1809 (version 1809, codename "Redstone 5") is an alias for [RS5].
+ V1809 = RS5
+ // LTSC2019 (Windows Server 2019) is an alias for [RS5].
+ LTSC2019 = RS5
- // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
+ // V19H1 (version 1903, codename 19H1) corresponds to Windows Server 1903 (semi-annual
// channel).
V19H1 = 18362
+ // V1903 (version 1903) is an alias for [V19H1].
+ V1903 = V19H1
- // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual
+ // V19H2 (version 1909, codename 19H2) corresponds to Windows Server 1909 (semi-annual
// channel).
V19H2 = 18363
+ // V1909 (version 1909) is an alias for [V19H2].
+ V1909 = V19H2
- // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual
+ // V20H1 (version 2004, codename 20H1) corresponds to Windows Server 2004 (semi-annual
// channel).
V20H1 = 19041
+ // V2004 (version 2004) is an alias for [V20H1].
+ V2004 = V20H1
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
V20H2 = 19042
+
+ // V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
+ V21H1 = 19043
+
+ // V21H2Win10 corresponds to Windows 10 (November 2021 Update).
+ V21H2Win10 = 19044
+
+ // V21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ V21H2Server = 20348
+ // LTSC2022 (Windows Server 2022) is an alias for [V21H2Server]
+ LTSC2022 = V21H2Server
+
+ // V21H2Win11 corresponds to Windows 11 (original release).
+ V21H2Win11 = 22000
+
+ // V22H2Win10 corresponds to Windows 10 (2022 Update).
+ V22H2Win10 = 19045
+
+ // V22H2Win11 corresponds to Windows 11 (2022 Update).
+ V22H2Win11 = 22621
)
diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go
index 3362c6833..44df91cde 100644
--- a/vendor/github.com/Microsoft/hcsshim/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/process.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package hcsshim
import (
diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
index 8bed84857..e43d59a40 100644
--- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated mksyscall_windows.exe DO NOT EDIT
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package hcsshim
@@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -26,13 +29,10 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
- return nil
+ return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
return e
}
@@ -43,7 +43,7 @@ var (
)
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetCurrentThreadCompartmentId.Addr(), uintptr(compartmentId))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore
index 6c7104aef..c66769f6c 100644
--- a/vendor/github.com/VividCortex/ewma/.gitignore
+++ b/vendor/github.com/VividCortex/ewma/.gitignore
@@ -1,2 +1,3 @@
.DS_Store
.*.sw?
+/coverage.txt
\ No newline at end of file
diff --git a/vendor/github.com/VividCortex/ewma/.whitesource b/vendor/github.com/VividCortex/ewma/.whitesource
new file mode 100644
index 000000000..d7eebc0cf
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/.whitesource
@@ -0,0 +1,3 @@
+{
+ "settingsInheritedFrom": "VividCortex/whitesource-config@master"
+}
\ No newline at end of file
diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md
index 7aab61b87..87b4a3c7e 100644
--- a/vendor/github.com/VividCortex/ewma/README.md
+++ b/vendor/github.com/VividCortex/ewma/README.md
@@ -1,4 +1,8 @@
-# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
+# EWMA
+
+[](https://godoc.org/github.com/VividCortex/ewma)
+
+[](https://codecov.io/gh/VividCortex/ewma)
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
@@ -103,23 +107,24 @@ View the GoDoc generated documentation [here](http://godoc.org/github.com/VividC
```go
package main
+
import "github.com/VividCortex/ewma"
func main() {
- samples := [100]float64{
- 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
- }
+ samples := [100]float64{
+ 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
+ }
- e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
- a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
+ e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
+ a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
- for _, f := range samples {
- e.Add(f)
- a.Add(f)
- }
+ for _, f := range samples {
+ e.Add(f)
+ a.Add(f)
+ }
- e.Value() //=> 13.577404704631077
- a.Value() //=> 1.5806140565521463e-12
+ e.Value() //=> 13.577404704631077
+ a.Value() //=> 1.5806140565521463e-12
}
```
diff --git a/vendor/github.com/VividCortex/ewma/codecov.yml b/vendor/github.com/VividCortex/ewma/codecov.yml
new file mode 100644
index 000000000..0d36d903f
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/codecov.yml
@@ -0,0 +1,6 @@
+coverage:
+ status:
+ project:
+ default:
+ threshold: 15%
+ patch: off
diff --git a/vendor/github.com/VividCortex/ewma/go.mod b/vendor/github.com/VividCortex/ewma/go.mod
new file mode 100644
index 000000000..e481e3dfb
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/go.mod
@@ -0,0 +1,3 @@
+module github.com/VividCortex/ewma
+
+go 1.12
diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore
new file mode 100644
index 000000000..8d69a9418
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/.gitignore
@@ -0,0 +1,15 @@
+bin/
+.idea/
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml
new file mode 100644
index 000000000..bb83c6670
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+dist: xenial
+go:
+ - '1.10'
+ - '1.11'
+ - '1.12'
+ - '1.13'
+ - 'tip'
+
+script:
+ - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..4b462b0d8
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
@@ -0,0 +1,43 @@
+# Contributor Code of Conduct
+
+This project adheres to [The Code Manifesto](http://codemanifesto.com)
+as its guidelines for contributor interactions.
+
+## The Code Manifesto
+
+We want to work in an ecosystem that empowers developers to reach their
+potential — one that encourages growth and effective collaboration. A space
+that is safe for all.
+
+A space such as this benefits everyone that participates in it. It encourages
+new developers to enter our field. It is through discussion and collaboration
+that we grow, and through growth that we improve.
+
+In the effort to create such a place, we hold to these values:
+
+1. **Discrimination limits us.** This includes discrimination on the basis of
+ race, gender, sexual orientation, gender identity, age, nationality,
+ technology and any other arbitrary exclusion of a group of people.
+2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
+ levels. Remember that, and if brought to your attention, heed it.
+3. **We are our biggest assets.** None of us were born masters of our trade.
+ Each of us has been helped along the way. Return that favor, when and where
+ you can.
+4. **We are resources for the future.** As an extension of #3, share what you
+ know. Make yourself a resource to help those that come after you.
+5. **Respect defines us.** Treat others as you wish to be treated. Make your
+ discussions, criticisms and debates from a position of respectfulness. Ask
+ yourself, is it true? Is it necessary? Is it constructive? Anything less is
+ unacceptable.
+6. **Reactions require grace.** Angry responses are valid, but abusive language
+ and vindictive actions are toxic. When something happens that offends you,
+ handle it assertively, but be respectful. Escalate reasonably, and try to
+ allow the offender an opportunity to explain themselves, and possibly
+ correct the issue.
+7. **Opinions are just that: opinions.** Each and every one of us, due to our
+ background and upbringing, have varying opinions. That is perfectly
+ acceptable. Remember this: if you respect your own opinions, you should
+ respect the opinions of others.
+8. **To err is human.** You might not intend it, but mistakes do happen and
+ contribute to build experience. Tolerate honest mistakes, and don't
+ hesitate to apologize if you make one yourself.
diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
new file mode 100644
index 000000000..7ed268a1e
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
@@ -0,0 +1,63 @@
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Financial contributions
+
+We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
+Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
+
+
+## Credits
+
+
+### Contributors
+
+Thank you to all the people who have already contributed to govalidator!
+
+
+
+### Backers
+
+Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE
new file mode 100644
index 000000000..cacba9102
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2020 Alex Saskevich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
new file mode 100644
index 000000000..2c3fc35eb
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/README.md
@@ -0,0 +1,622 @@
+govalidator
+===========
+[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator)
+[](https://travis-ci.org/asaskevich/govalidator)
+[](https://codecov.io/gh/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
+
+A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
+
+#### Installation
+Make sure that Go is installed on your computer.
+Type the following command in your terminal:
+
+ go get github.com/asaskevich/govalidator
+
+or you can get specified release of the package with `gopkg.in`:
+
+ go get gopkg.in/asaskevich/govalidator.v10
+
+After it the package is ready to use.
+
+
+#### Import package in your project
+Add following line in your `*.go` file:
+```go
+import "github.com/asaskevich/govalidator"
+```
+If you are unhappy to use long `govalidator`, you can do something like this:
+```go
+import (
+ valid "github.com/asaskevich/govalidator"
+)
+```
+
+#### Activate behavior to require all fields have a validation tag by default
+`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
+
+`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
+
+```go
+import "github.com/asaskevich/govalidator"
+
+func init() {
+ govalidator.SetFieldsRequiredByDefault(true)
+}
+```
+
+Here's some code to explain it:
+```go
+// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+type exampleStruct struct {
+ Name string ``
+ Email string `valid:"email"`
+}
+
+// this, however, will only fail when Email is empty or an invalid email address:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email"`
+}
+
+// lastly, this will only fail when Email is an invalid email address but not when it's empty:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email,optional"`
+}
+```
+
+#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
+##### Custom validator function signature
+A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
+```go
+import "github.com/asaskevich/govalidator"
+
+// old signature
+func(i interface{}) bool
+
+// new signature
+func(i interface{}, o interface{}) bool
+```
+
+##### Adding a custom validator
+This was changed to prevent data races when accessing custom validators.
+```go
+import "github.com/asaskevich/govalidator"
+
+// before
+govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool {
+ // ...
+}
+
+// after
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool {
+ // ...
+})
+```
+
+#### List of functions:
+```go
+func Abs(value float64) float64
+func BlackList(str, chars string) string
+func ByteLength(str string, params ...string) bool
+func CamelCaseToUnderscore(str string) string
+func Contains(str, substring string) bool
+func Count(array []interface{}, iterator ConditionIterator) int
+func Each(array []interface{}, iterator Iterator)
+func ErrorByField(e error, field string) string
+func ErrorsByField(e error) map[string]string
+func Filter(array []interface{}, iterator ConditionIterator) []interface{}
+func Find(array []interface{}, iterator ConditionIterator) interface{}
+func GetLine(s string, index int) (string, error)
+func GetLines(s string) []string
+func HasLowerCase(str string) bool
+func HasUpperCase(str string) bool
+func HasWhitespace(str string) bool
+func HasWhitespaceOnly(str string) bool
+func InRange(value interface{}, left interface{}, right interface{}) bool
+func InRangeFloat32(value, left, right float32) bool
+func InRangeFloat64(value, left, right float64) bool
+func InRangeInt(value, left, right interface{}) bool
+func IsASCII(str string) bool
+func IsAlpha(str string) bool
+func IsAlphanumeric(str string) bool
+func IsBase64(str string) bool
+func IsByteLength(str string, min, max int) bool
+func IsCIDR(str string) bool
+func IsCRC32(str string) bool
+func IsCRC32b(str string) bool
+func IsCreditCard(str string) bool
+func IsDNSName(str string) bool
+func IsDataURI(str string) bool
+func IsDialString(str string) bool
+func IsDivisibleBy(str, num string) bool
+func IsEmail(str string) bool
+func IsExistingEmail(email string) bool
+func IsFilePath(str string) (bool, int)
+func IsFloat(str string) bool
+func IsFullWidth(str string) bool
+func IsHalfWidth(str string) bool
+func IsHash(str string, algorithm string) bool
+func IsHexadecimal(str string) bool
+func IsHexcolor(str string) bool
+func IsHost(str string) bool
+func IsIP(str string) bool
+func IsIPv4(str string) bool
+func IsIPv6(str string) bool
+func IsISBN(str string, version int) bool
+func IsISBN10(str string) bool
+func IsISBN13(str string) bool
+func IsISO3166Alpha2(str string) bool
+func IsISO3166Alpha3(str string) bool
+func IsISO4217(str string) bool
+func IsISO693Alpha2(str string) bool
+func IsISO693Alpha3b(str string) bool
+func IsIn(str string, params ...string) bool
+func IsInRaw(str string, params ...string) bool
+func IsInt(str string) bool
+func IsJSON(str string) bool
+func IsLatitude(str string) bool
+func IsLongitude(str string) bool
+func IsLowerCase(str string) bool
+func IsMAC(str string) bool
+func IsMD4(str string) bool
+func IsMD5(str string) bool
+func IsMagnetURI(str string) bool
+func IsMongoID(str string) bool
+func IsMultibyte(str string) bool
+func IsNatural(value float64) bool
+func IsNegative(value float64) bool
+func IsNonNegative(value float64) bool
+func IsNonPositive(value float64) bool
+func IsNotNull(str string) bool
+func IsNull(str string) bool
+func IsNumeric(str string) bool
+func IsPort(str string) bool
+func IsPositive(value float64) bool
+func IsPrintableASCII(str string) bool
+func IsRFC3339(str string) bool
+func IsRFC3339WithoutZone(str string) bool
+func IsRGBcolor(str string) bool
+func IsRegex(str string) bool
+func IsRequestURI(rawurl string) bool
+func IsRequestURL(rawurl string) bool
+func IsRipeMD128(str string) bool
+func IsRipeMD160(str string) bool
+func IsRsaPub(str string, params ...string) bool
+func IsRsaPublicKey(str string, keylen int) bool
+func IsSHA1(str string) bool
+func IsSHA256(str string) bool
+func IsSHA384(str string) bool
+func IsSHA512(str string) bool
+func IsSSN(str string) bool
+func IsSemver(str string) bool
+func IsTiger128(str string) bool
+func IsTiger160(str string) bool
+func IsTiger192(str string) bool
+func IsTime(str string, format string) bool
+func IsType(v interface{}, params ...string) bool
+func IsURL(str string) bool
+func IsUTFDigit(str string) bool
+func IsUTFLetter(str string) bool
+func IsUTFLetterNumeric(str string) bool
+func IsUTFNumeric(str string) bool
+func IsUUID(str string) bool
+func IsUUIDv3(str string) bool
+func IsUUIDv4(str string) bool
+func IsUUIDv5(str string) bool
+func IsULID(str string) bool
+func IsUnixTime(str string) bool
+func IsUpperCase(str string) bool
+func IsVariableWidth(str string) bool
+func IsWhole(value float64) bool
+func LeftTrim(str, chars string) string
+func Map(array []interface{}, iterator ResultIterator) []interface{}
+func Matches(str, pattern string) bool
+func MaxStringLength(str string, params ...string) bool
+func MinStringLength(str string, params ...string) bool
+func NormalizeEmail(str string) (string, error)
+func PadBoth(str string, padStr string, padLen int) string
+func PadLeft(str string, padStr string, padLen int) string
+func PadRight(str string, padStr string, padLen int) string
+func PrependPathToErrors(err error, path string) error
+func Range(str string, params ...string) bool
+func RemoveTags(s string) string
+func ReplacePattern(str, pattern, replace string) string
+func Reverse(s string) string
+func RightTrim(str, chars string) string
+func RuneLength(str string, params ...string) bool
+func SafeFileName(str string) string
+func SetFieldsRequiredByDefault(value bool)
+func SetNilPtrAllowedByRequired(value bool)
+func Sign(value float64) float64
+func StringLength(str string, params ...string) bool
+func StringMatches(s string, params ...string) bool
+func StripLow(str string, keepNewLines bool) string
+func ToBoolean(str string) (bool, error)
+func ToFloat(str string) (float64, error)
+func ToInt(value interface{}) (res int64, err error)
+func ToJSON(obj interface{}) (string, error)
+func ToString(obj interface{}) string
+func Trim(str, chars string) string
+func Truncate(str string, length int, ending string) string
+func TruncatingErrorf(str string, args ...interface{}) error
+func UnderscoreToCamelCase(s string) string
+func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error)
+func ValidateStruct(s interface{}) (bool, error)
+func WhiteList(str, chars string) string
+type ConditionIterator
+type CustomTypeValidator
+type Error
+func (e Error) Error() string
+type Errors
+func (es Errors) Error() string
+func (es Errors) Errors() []error
+type ISO3166Entry
+type ISO693Entry
+type InterfaceParamValidator
+type Iterator
+type ParamValidator
+type ResultIterator
+type UnsupportedTypeError
+func (e *UnsupportedTypeError) Error() string
+type Validator
+```
+
+#### Examples
+###### IsURL
+```go
+println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
+```
+###### IsType
+```go
+println(govalidator.IsType("Bob", "string"))
+println(govalidator.IsType(1, "int"))
+i := 1
+println(govalidator.IsType(&i, "*int"))
+```
+
+IsType can be used through the tag `type` which is essential for map validation:
+```go
+type User struct {
+ Name string `valid:"type(string)"`
+ Age int `valid:"type(int)"`
+ Meta interface{} `valid:"type(string)"`
+}
+result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ToString
+```go
+type User struct {
+ FirstName string
+ LastName string
+}
+
+str := govalidator.ToString(&User{"John", "Juan"})
+println(str)
+```
+###### Each, Map, Filter, Count for slices
+Each iterates over the slice/array and calls Iterator for every item
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.Iterator = func(value interface{}, index int) {
+ println(value.(int))
+}
+govalidator.Each(data, fn)
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
+ return value.(int) * 3
+}
+_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
+ return value.(int)%2 == 0
+}
+_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
+_ = govalidator.Count(data, fn) // result = 5
+```
+###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
+If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
+```go
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+```
+For completely custom validators (interface-based), see below.
+
+Here is a list of available validators for struct fields (validator - used function):
+```go
+"email": IsEmail,
+"url": IsURL,
+"dialstring": IsDialString,
+"requrl": IsRequestURL,
+"requri": IsRequestURI,
+"alpha": IsAlpha,
+"utfletter": IsUTFLetter,
+"alphanum": IsAlphanumeric,
+"utfletternum": IsUTFLetterNumeric,
+"numeric": IsNumeric,
+"utfnumeric": IsUTFNumeric,
+"utfdigit": IsUTFDigit,
+"hexadecimal": IsHexadecimal,
+"hexcolor": IsHexcolor,
+"rgbcolor": IsRGBcolor,
+"lowercase": IsLowerCase,
+"uppercase": IsUpperCase,
+"int": IsInt,
+"float": IsFloat,
+"null": IsNull,
+"uuid": IsUUID,
+"uuidv3": IsUUIDv3,
+"uuidv4": IsUUIDv4,
+"uuidv5": IsUUIDv5,
+"creditcard": IsCreditCard,
+"isbn10": IsISBN10,
+"isbn13": IsISBN13,
+"json": IsJSON,
+"multibyte": IsMultibyte,
+"ascii": IsASCII,
+"printableascii": IsPrintableASCII,
+"fullwidth": IsFullWidth,
+"halfwidth": IsHalfWidth,
+"variablewidth": IsVariableWidth,
+"base64": IsBase64,
+"datauri": IsDataURI,
+"ip": IsIP,
+"port": IsPort,
+"ipv4": IsIPv4,
+"ipv6": IsIPv6,
+"dns": IsDNSName,
+"host": IsHost,
+"mac": IsMAC,
+"latitude": IsLatitude,
+"longitude": IsLongitude,
+"ssn": IsSSN,
+"semver": IsSemver,
+"rfc3339": IsRFC3339,
+"rfc3339WithoutZone": IsRFC3339WithoutZone,
+"ISO3166Alpha2": IsISO3166Alpha2,
+"ISO3166Alpha3": IsISO3166Alpha3,
+"ulid": IsULID,
+```
+Validators with parameters
+
+```go
+"range(min|max)": Range,
+"length(min|max)": ByteLength,
+"runelength(min|max)": RuneLength,
+"stringlength(min|max)": StringLength,
+"matches(pattern)": StringMatches,
+"in(string1|string2|...|stringN)": IsIn,
+"rsapub(keylength)" : IsRsaPub,
+"minstringlength(int): MinStringLength,
+"maxstringlength(int): MaxStringLength,
+```
+Validators with parameters for any type
+
+```go
+"type(type)": IsType,
+```
+
+And here is small example of usage:
+```go
+type Post struct {
+ Title string `valid:"alphanum,required"`
+ Message string `valid:"duck,ascii"`
+ Message2 string `valid:"animal(dog)"`
+ AuthorIP string `valid:"ipv4"`
+ Date string `valid:"-"`
+}
+post := &Post{
+ Title: "My Example Post",
+ Message: "duck",
+ Message2: "dog",
+ AuthorIP: "123.234.54.3",
+}
+
+// Add your own struct validation tags
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+
+// Add your own struct validation tags with parameter
+govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
+ species := params[0]
+ return str == species
+})
+govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
+
+result, err := govalidator.ValidateStruct(post)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338)
+If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}`
+
+So here is small example of usage:
+```go
+var mapTemplate = map[string]interface{}{
+ "name":"required,alpha",
+ "family":"required,alpha",
+ "email":"required,email",
+ "cell-phone":"numeric",
+ "address":map[string]interface{}{
+ "line1":"required,alphanum",
+ "line2":"alphanum",
+ "postal-code":"numeric",
+ },
+}
+
+var inputMap = map[string]interface{}{
+ "name":"Bob",
+ "family":"Smith",
+ "email":"foo@bar.baz",
+ "address":map[string]interface{}{
+ "line1":"",
+ "line2":"",
+ "postal-code":"",
+ },
+}
+
+result, err := govalidator.ValidateMap(inputMap, mapTemplate)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+
+###### WhiteList
+```go
+// Remove all characters from string ignoring characters between "a" and "z"
+println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
+```
+
+###### Custom validation functions
+Custom validation using your own domain specific validators is also available - here's an example of how to use it:
+```go
+import "github.com/asaskevich/govalidator"
+
+type CustomByteArray [6]byte // custom types are supported and can be validated
+
+type StructWithCustomByteArray struct {
+ ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
+ Email string `valid:"email"`
+ CustomMinLength int `valid:"-"`
+}
+
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // you can type switch on the context interface being validated
+ case StructWithCustomByteArray:
+ // you can check and validate against some other field in the context,
+ // return early or not validate against the context at all – your choice
+ case SomeOtherType:
+ // ...
+ default:
+ // expecting some other type? Throw/panic here or continue
+ }
+
+ switch v := i.(type) { // type switch on the struct field being validated
+ case CustomByteArray:
+ for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
+ if e != 0 {
+ return true
+ }
+ }
+ }
+ return false
+})
+govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
+ case StructWithCustomByteArray:
+ return len(v.ID) >= v.CustomMinLength
+ }
+ return false
+})
+```
+
+###### Loop over Error()
+By default .Error() returns all errors in a single String. To access each error you can do this:
+```go
+ if err != nil {
+ errs := err.(govalidator.Errors).Errors()
+ for _, e := range errs {
+ fmt.Println(e.Error())
+ }
+ }
+```
+
+###### Custom error messages
+Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
+```go
+type Ticket struct {
+ Id int64 `json:"id"`
+ FirstName string `json:"firstname" valid:"required~First name is blank"`
+}
+```
+
+#### Notes
+Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
+Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
+
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Credits
+### Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
+
+#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
+* [Daniel Lohse](https://github.com/annismckenzie)
+* [Attila Oláh](https://github.com/attilaolah)
+* [Daniel Korner](https://github.com/Dadie)
+* [Steven Wilkin](https://github.com/stevenwilkin)
+* [Deiwin Sarjas](https://github.com/deiwin)
+* [Noah Shibley](https://github.com/slugmobile)
+* [Nathan Davies](https://github.com/nathj07)
+* [Matt Sanford](https://github.com/mzsanford)
+* [Simon ccl1115](https://github.com/ccl1115)
+
+
+
+
+### Backers
+
+Thank you to all our backers! 🙠[[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## License
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
new file mode 100644
index 000000000..3e1da7cb4
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -0,0 +1,87 @@
+package govalidator
+
+// Iterator is the function that accepts element of slice/array and its index
+type Iterator func(interface{}, int)
+
+// ResultIterator is the function that accepts element of slice/array and its index and returns any result
+type ResultIterator func(interface{}, int) interface{}
+
+// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
+type ConditionIterator func(interface{}, int) bool
+
+// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
+type ReduceIterator func(interface{}, interface{}) interface{}
+
+// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
+func Some(array []interface{}, iterator ConditionIterator) bool {
+ res := false
+ for index, data := range array {
+ res = res || iterator(data, index)
+ }
+ return res
+}
+
+// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
+func Every(array []interface{}, iterator ConditionIterator) bool {
+ res := true
+ for index, data := range array {
+ res = res && iterator(data, index)
+ }
+ return res
+}
+
+// Reduce boils down a list of values into a single value by ReduceIterator
+func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
+ for _, data := range array {
+ initialValue = iterator(initialValue, data)
+ }
+ return initialValue
+}
+
+// Each iterates over the slice and apply Iterator to every item
+func Each(array []interface{}, iterator Iterator) {
+ for index, data := range array {
+ iterator(data, index)
+ }
+}
+
+// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
+func Map(array []interface{}, iterator ResultIterator) []interface{} {
+ var result = make([]interface{}, len(array))
+ for index, data := range array {
+ result[index] = iterator(data, index)
+ }
+ return result
+}
+
+// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
+func Find(array []interface{}, iterator ConditionIterator) interface{} {
+ for index, data := range array {
+ if iterator(data, index) {
+ return data
+ }
+ }
+ return nil
+}
+
+// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
+func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
+ var result = make([]interface{}, 0)
+ for index, data := range array {
+ if iterator(data, index) {
+ result = append(result, data)
+ }
+ }
+ return result
+}
+
+// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
+func Count(array []interface{}, iterator ConditionIterator) int {
+ count := 0
+ for index, data := range array {
+ if iterator(data, index) {
+ count = count + 1
+ }
+ }
+ return count
+}
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
new file mode 100644
index 000000000..d68e990fc
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/converter.go
@@ -0,0 +1,81 @@
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// ToString convert the input to a string.
+func ToString(obj interface{}) string {
+ res := fmt.Sprintf("%v", obj)
+ return res
+}
+
+// ToJSON convert the input to a valid JSON string
+func ToJSON(obj interface{}) (string, error) {
+ res, err := json.Marshal(obj)
+ if err != nil {
+ res = []byte("")
+ }
+ return string(res), err
+}
+
+// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
+func ToFloat(value interface{}) (res float64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = float64(val.Int())
+ case uint, uint8, uint16, uint32, uint64:
+ res = float64(val.Uint())
+ case float32, float64:
+ res = val.Float()
+ case string:
+ res, err = strconv.ParseFloat(val.String(), 64)
+ if err != nil {
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
+func ToInt(value interface{}) (res int64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = val.Int()
+ case uint, uint8, uint16, uint32, uint64:
+ res = int64(val.Uint())
+ case float32, float64:
+ res = int64(val.Float())
+ case string:
+ if IsInt(val.String()) {
+ res, err = strconv.ParseInt(val.String(), 0, 64)
+ if err != nil {
+ res = 0
+ }
+ } else {
+ err = fmt.Errorf("ToInt: invalid numeric format %g", value)
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToBoolean convert the input string to a boolean.
+func ToBoolean(str string) (bool, error) {
+ return strconv.ParseBool(str)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go
new file mode 100644
index 000000000..55dce62dc
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/doc.go
@@ -0,0 +1,3 @@
+package govalidator
+
+// A package of validators and sanitizers for strings, structures and collections.
diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go
new file mode 100644
index 000000000..1da2336f4
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/error.go
@@ -0,0 +1,47 @@
+package govalidator
+
+import (
+ "sort"
+ "strings"
+)
+
+// Errors is an array of multiple errors and conforms to the error interface.
+type Errors []error
+
+// Errors returns itself.
+func (es Errors) Errors() []error {
+ return es
+}
+
+func (es Errors) Error() string {
+ var errs []string
+ for _, e := range es {
+ errs = append(errs, e.Error())
+ }
+ sort.Strings(errs)
+ return strings.Join(errs, ";")
+}
+
+// Error encapsulates a name, an error and whether there's a custom error message or not.
+type Error struct {
+ Name string
+ Err error
+ CustomErrorMessageExists bool
+
+ // Validator indicates the name of the validator that failed
+ Validator string
+ Path []string
+}
+
+func (e Error) Error() string {
+ if e.CustomErrorMessageExists {
+ return e.Err.Error()
+ }
+
+ errName := e.Name
+ if len(e.Path) > 0 {
+ errName = strings.Join(append(e.Path, e.Name), ".")
+ }
+
+ return errName + ": " + e.Err.Error()
+}
diff --git a/vendor/github.com/asaskevich/govalidator/go.mod b/vendor/github.com/asaskevich/govalidator/go.mod
new file mode 100644
index 000000000..42d5b1f63
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/go.mod
@@ -0,0 +1,3 @@
+module github.com/asaskevich/govalidator
+
+go 1.13
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
new file mode 100644
index 000000000..5041d9e86
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -0,0 +1,100 @@
+package govalidator
+
+import (
+ "math"
+)
+
+// Abs returns absolute value of number
+func Abs(value float64) float64 {
+ return math.Abs(value)
+}
+
+// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
+func Sign(value float64) float64 {
+ if value > 0 {
+ return 1
+ } else if value < 0 {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+// IsNegative returns true if value < 0
+func IsNegative(value float64) bool {
+ return value < 0
+}
+
+// IsPositive returns true if value > 0
+func IsPositive(value float64) bool {
+ return value > 0
+}
+
+// IsNonNegative returns true if value >= 0
+func IsNonNegative(value float64) bool {
+ return value >= 0
+}
+
+// IsNonPositive returns true if value <= 0
+func IsNonPositive(value float64) bool {
+ return value <= 0
+}
+
+// InRangeInt returns true if value lies between left and right border
+func InRangeInt(value, left, right interface{}) bool {
+ value64, _ := ToInt(value)
+ left64, _ := ToInt(left)
+ right64, _ := ToInt(right)
+ if left64 > right64 {
+ left64, right64 = right64, left64
+ }
+ return value64 >= left64 && value64 <= right64
+}
+
+// InRangeFloat32 returns true if value lies between left and right border
+func InRangeFloat32(value, left, right float32) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRangeFloat64 returns true if value lies between left and right border
+func InRangeFloat64(value, left, right float64) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
+// All types must the same type.
+// False if value doesn't lie in range or if it incompatible or not comparable
+func InRange(value interface{}, left interface{}, right interface{}) bool {
+ switch value.(type) {
+ case int:
+ intValue, _ := ToInt(value)
+ intLeft, _ := ToInt(left)
+ intRight, _ := ToInt(right)
+ return InRangeInt(intValue, intLeft, intRight)
+ case float32, float64:
+ intValue, _ := ToFloat(value)
+ intLeft, _ := ToFloat(left)
+ intRight, _ := ToFloat(right)
+ return InRangeFloat64(intValue, intLeft, intRight)
+ case string:
+ return value.(string) >= left.(string) && value.(string) <= right.(string)
+ default:
+ return false
+ }
+}
+
+// IsWhole returns true if value is whole number
+func IsWhole(value float64) bool {
+ return math.Remainder(value, 1) == 0
+}
+
+// IsNatural returns true if value is natural number (positive and whole)
+func IsNatural(value float64) bool {
+ return IsWhole(value) && IsPositive(value)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
new file mode 100644
index 000000000..bafc3765e
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -0,0 +1,113 @@
+package govalidator
+
+import "regexp"
+
+// Basic regular expressions for validating strings
+const (
+ Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$"
+ ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
+ ISBN13 string = "^(?:[0-9]{13})$"
+ UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ Alpha string = "^[a-zA-Z]+$"
+ Alphanumeric string = "^[a-zA-Z0-9]+$"
+ Numeric string = "^[0-9]+$"
+ Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
+ Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
+ Hexadecimal string = "^[0-9a-fA-F]+$"
+ Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
+ ASCII string = "^[\x00-\x7F]+$"
+ Multibyte string = "[^\x00-\x7F]"
+ FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ PrintableASCII string = "^[\x20-\x7E]+$"
+ DataURI string = "^data:.+\\/(.+);base64$"
+ MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$"
+ Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
+ IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername string = `(\S+(:\S*)?@)`
+ URLPath string = `((\/|\?|#)[^\s]*)`
+ URLPort string = `(:(\d{1,5}))`
+ URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
+ URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
+ URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+ SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+ WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixPath string = `^(/[^/\x00]*)+/?$`
+ WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$â—-]+\\[a-z0-9_.$â—-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
+ Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
+ tagName string = "valid"
+ hasLowerCase string = ".*[[:lower:]]"
+ hasUpperCase string = ".*[[:upper:]]"
+ hasWhitespace string = ".*[[:space:]]"
+ hasWhitespaceOnly string = "^[[:space:]]+$"
+ IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
+ IMSI string = "^\\d{14,15}$"
+ E164 string = `^\+?[1-9]\d{1,14}$`
+)
+
+// Used by IsFilePath func
+const (
+ // Unknown is unresolved OS type
+ Unknown = iota
+ // Win is Windows type
+ Win
+ // Unix is *nix OS types
+ Unix
+)
+
+var (
+ userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
+ hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
+ userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
+ rxEmail = regexp.MustCompile(Email)
+ rxCreditCard = regexp.MustCompile(CreditCard)
+ rxISBN10 = regexp.MustCompile(ISBN10)
+ rxISBN13 = regexp.MustCompile(ISBN13)
+ rxUUID3 = regexp.MustCompile(UUID3)
+ rxUUID4 = regexp.MustCompile(UUID4)
+ rxUUID5 = regexp.MustCompile(UUID5)
+ rxUUID = regexp.MustCompile(UUID)
+ rxAlpha = regexp.MustCompile(Alpha)
+ rxAlphanumeric = regexp.MustCompile(Alphanumeric)
+ rxNumeric = regexp.MustCompile(Numeric)
+ rxInt = regexp.MustCompile(Int)
+ rxFloat = regexp.MustCompile(Float)
+ rxHexadecimal = regexp.MustCompile(Hexadecimal)
+ rxHexcolor = regexp.MustCompile(Hexcolor)
+ rxRGBcolor = regexp.MustCompile(RGBcolor)
+ rxASCII = regexp.MustCompile(ASCII)
+ rxPrintableASCII = regexp.MustCompile(PrintableASCII)
+ rxMultibyte = regexp.MustCompile(Multibyte)
+ rxFullWidth = regexp.MustCompile(FullWidth)
+ rxHalfWidth = regexp.MustCompile(HalfWidth)
+ rxBase64 = regexp.MustCompile(Base64)
+ rxDataURI = regexp.MustCompile(DataURI)
+ rxMagnetURI = regexp.MustCompile(MagnetURI)
+ rxLatitude = regexp.MustCompile(Latitude)
+ rxLongitude = regexp.MustCompile(Longitude)
+ rxDNSName = regexp.MustCompile(DNSName)
+ rxURL = regexp.MustCompile(URL)
+ rxSSN = regexp.MustCompile(SSN)
+ rxWinPath = regexp.MustCompile(WinPath)
+ rxUnixPath = regexp.MustCompile(UnixPath)
+ rxARWinPath = regexp.MustCompile(WinARPath)
+ rxARUnixPath = regexp.MustCompile(UnixARPath)
+ rxSemver = regexp.MustCompile(Semver)
+ rxHasLowerCase = regexp.MustCompile(hasLowerCase)
+ rxHasUpperCase = regexp.MustCompile(hasUpperCase)
+ rxHasWhitespace = regexp.MustCompile(hasWhitespace)
+ rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
+ rxIMEI = regexp.MustCompile(IMEI)
+ rxIMSI = regexp.MustCompile(IMSI)
+ rxE164 = regexp.MustCompile(E164)
+)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
new file mode 100644
index 000000000..c573abb51
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/types.go
@@ -0,0 +1,656 @@
+package govalidator
+
+import (
+ "reflect"
+ "regexp"
+ "sort"
+ "sync"
+)
+
+// Validator is a wrapper for a validator function that returns bool and accepts string.
+type Validator func(str string) bool
+
+// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
+// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
+type CustomTypeValidator func(i interface{}, o interface{}) bool
+
+// ParamValidator is a wrapper for validator functions that accept additional parameters.
+type ParamValidator func(str string, params ...string) bool
+
+// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
+type InterfaceParamValidator func(in interface{}, params ...string) bool
+type tagOptionsMap map[string]tagOption
+
+func (t tagOptionsMap) orderedKeys() []string {
+ var keys []string
+ for k := range t {
+ keys = append(keys, k)
+ }
+
+ sort.Slice(keys, func(a, b int) bool {
+ return t[keys[a]].order < t[keys[b]].order
+ })
+
+ return keys
+}
+
+type tagOption struct {
+ name string
+ customErrorMessage string
+ order int
+}
+
+// UnsupportedTypeError is a wrapper for reflect.Type
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value
+var InterfaceParamTagMap = map[string]InterfaceParamValidator{
+ "type": IsType,
+}
+
+// InterfaceParamTagRegexMap maps interface param tags to their respective regexes.
+var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{
+ "type": regexp.MustCompile(`^type\((.*)\)$`),
+}
+
+// ParamTagMap is a map of functions accept variants parameters
+var ParamTagMap = map[string]ParamValidator{
+ "length": ByteLength,
+ "range": Range,
+ "runelength": RuneLength,
+ "stringlength": StringLength,
+ "matches": StringMatches,
+ "in": IsInRaw,
+ "rsapub": IsRsaPub,
+ "minstringlength": MinStringLength,
+ "maxstringlength": MaxStringLength,
+}
+
+// ParamTagRegexMap maps param tags to their respective regexes.
+var ParamTagRegexMap = map[string]*regexp.Regexp{
+ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "in": regexp.MustCompile(`^in\((.*)\)`),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+ "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
+ "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
+ "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
+}
+
+type customTypeTagMap struct {
+ validators map[string]CustomTypeValidator
+
+ sync.RWMutex
+}
+
+func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
+ tm.RLock()
+ defer tm.RUnlock()
+ v, ok := tm.validators[name]
+ return v, ok
+}
+
+func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
+ tm.Lock()
+ defer tm.Unlock()
+ tm.validators[name] = ctv
+}
+
+// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
+// Use this to validate compound or custom types that need to be handled as a whole, e.g.
+// `type UUID [16]byte` (this would be handled as an array of bytes).
+var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
+
+// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
+var TagMap = map[string]Validator{
+ "email": IsEmail,
+ "url": IsURL,
+ "dialstring": IsDialString,
+ "requrl": IsRequestURL,
+ "requri": IsRequestURI,
+ "alpha": IsAlpha,
+ "utfletter": IsUTFLetter,
+ "alphanum": IsAlphanumeric,
+ "utfletternum": IsUTFLetterNumeric,
+ "numeric": IsNumeric,
+ "utfnumeric": IsUTFNumeric,
+ "utfdigit": IsUTFDigit,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHexcolor,
+ "rgbcolor": IsRGBcolor,
+ "lowercase": IsLowerCase,
+ "uppercase": IsUpperCase,
+ "int": IsInt,
+ "float": IsFloat,
+ "null": IsNull,
+ "notnull": IsNotNull,
+ "uuid": IsUUID,
+ "uuidv3": IsUUIDv3,
+ "uuidv4": IsUUIDv4,
+ "uuidv5": IsUUIDv5,
+ "creditcard": IsCreditCard,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "json": IsJSON,
+ "multibyte": IsMultibyte,
+ "ascii": IsASCII,
+ "printableascii": IsPrintableASCII,
+ "fullwidth": IsFullWidth,
+ "halfwidth": IsHalfWidth,
+ "variablewidth": IsVariableWidth,
+ "base64": IsBase64,
+ "datauri": IsDataURI,
+ "ip": IsIP,
+ "port": IsPort,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "dns": IsDNSName,
+ "host": IsHost,
+ "mac": IsMAC,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "semver": IsSemver,
+ "rfc3339": IsRFC3339,
+ "rfc3339WithoutZone": IsRFC3339WithoutZone,
+ "ISO3166Alpha2": IsISO3166Alpha2,
+ "ISO3166Alpha3": IsISO3166Alpha3,
+ "ISO4217": IsISO4217,
+ "IMEI": IsIMEI,
+ "ulid": IsULID,
+}
+
+// ISO3166Entry stores country codes
+type ISO3166Entry struct {
+ EnglishShortName string
+ FrenchShortName string
+ Alpha2Code string
+ Alpha3Code string
+ Numeric string
+}
+
+//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
+var ISO3166List = []ISO3166Entry{
+ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
+ {"Albania", "Albanie (l')", "AL", "ALB", "008"},
+ {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
+ {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
+ {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
+ {"Andorra", "Andorre (l')", "AD", "AND", "020"},
+ {"Angola", "Angola (l')", "AO", "AGO", "024"},
+ {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
+ {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
+ {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
+ {"Australia", "Australie (l')", "AU", "AUS", "036"},
+ {"Austria", "Autriche (l')", "AT", "AUT", "040"},
+ {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
+ {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
+ {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
+ {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
+ {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
+ {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
+ {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
+ {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
+ {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
+ {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
+ {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
+ {"Bouvet Island", "Bouvet (l'ÃŽle)", "BV", "BVT", "074"},
+ {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
+ {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
+ {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
+ {"Solomon Islands", "Salomon (ÃŽles)", "SB", "SLB", "090"},
+ {"Virgin Islands (British)", "Vierges britanniques (les ÃŽles)", "VG", "VGB", "092"},
+ {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
+ {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
+ {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
+ {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
+ {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
+ {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
+ {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
+ {"Canada", "Canada (le)", "CA", "CAN", "124"},
+ {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
+ {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
+ {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
+ {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
+ {"Chad", "Tchad (le)", "TD", "TCD", "148"},
+ {"Chile", "Chili (le)", "CL", "CHL", "152"},
+ {"China", "Chine (la)", "CN", "CHN", "156"},
+ {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
+ {"Christmas Island", "Christmas (l'ÃŽle)", "CX", "CXR", "162"},
+ {"Cocos (Keeling) Islands (the)", "Cocos (les ÃŽles)/ Keeling (les ÃŽles)", "CC", "CCK", "166"},
+ {"Colombia", "Colombie (la)", "CO", "COL", "170"},
+ {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
+ {"Mayotte", "Mayotte", "YT", "MYT", "175"},
+ {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
+ {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
+ {"Cook Islands (the)", "Cook (les ÃŽles)", "CK", "COK", "184"},
+ {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
+ {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
+ {"Cuba", "Cuba", "CU", "CUB", "192"},
+ {"Cyprus", "Chypre", "CY", "CYP", "196"},
+ {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
+ {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
+ {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
+ {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
+ {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
+ {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
+ {"El Salvador", "El Salvador", "SV", "SLV", "222"},
+ {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
+ {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
+ {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
+ {"Estonia", "Estonie (l')", "EE", "EST", "233"},
+ {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
+ {"Falkland Islands (the) [Malvinas]", "Falkland (les ÃŽles)/Malouines (les ÃŽles)", "FK", "FLK", "238"},
+ {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
+ {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
+ {"Finland", "Finlande (la)", "FI", "FIN", "246"},
+ {"Ã…land Islands", "Ã…land(les ÃŽles)", "AX", "ALA", "248"},
+ {"France", "France (la)", "FR", "FRA", "250"},
+ {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
+ {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
+ {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
+ {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
+ {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
+ {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
+ {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
+ {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
+ {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
+ {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
+ {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
+ {"Kiribati", "Kiribati", "KI", "KIR", "296"},
+ {"Greece", "Grèce (la)", "GR", "GRC", "300"},
+ {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
+ {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
+ {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
+ {"Guam", "Guam", "GU", "GUM", "316"},
+ {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
+ {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
+ {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
+ {"Haiti", "Haïti", "HT", "HTI", "332"},
+ {"Heard Island and McDonald Islands", "Heard-et-ÃŽles MacDonald (l'ÃŽle)", "HM", "HMD", "334"},
+ {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
+ {"Honduras", "Honduras (le)", "HN", "HND", "340"},
+ {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
+ {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
+ {"Iceland", "Islande (l')", "IS", "ISL", "352"},
+ {"India", "Inde (l')", "IN", "IND", "356"},
+ {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
+ {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
+ {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
+ {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
+ {"Israel", "Israël", "IL", "ISR", "376"},
+ {"Italy", "Italie (l')", "IT", "ITA", "380"},
+ {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
+ {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
+ {"Japan", "Japon (le)", "JP", "JPN", "392"},
+ {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
+ {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
+ {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
+ {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
+ {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
+ {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
+ {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
+ {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
+ {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
+ {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
+ {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
+ {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
+ {"Libya", "Libye (la)", "LY", "LBY", "434"},
+ {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
+ {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
+ {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
+ {"Macao", "Macao", "MO", "MAC", "446"},
+ {"Madagascar", "Madagascar", "MG", "MDG", "450"},
+ {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
+ {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
+ {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
+ {"Mali", "Mali (le)", "ML", "MLI", "466"},
+ {"Malta", "Malte", "MT", "MLT", "470"},
+ {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
+ {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
+ {"Mauritius", "Maurice", "MU", "MUS", "480"},
+ {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
+ {"Monaco", "Monaco", "MC", "MCO", "492"},
+ {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
+ {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
+ {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
+ {"Montserrat", "Montserrat", "MS", "MSR", "500"},
+ {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
+ {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
+ {"Oman", "Oman", "OM", "OMN", "512"},
+ {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
+ {"Nauru", "Nauru", "NR", "NRU", "520"},
+ {"Nepal", "Népal (le)", "NP", "NPL", "524"},
+ {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
+ {"Curaçao", "Curaçao", "CW", "CUW", "531"},
+ {"Aruba", "Aruba", "AW", "ABW", "533"},
+ {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
+ {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
+ {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
+ {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
+ {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
+ {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
+ {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
+ {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
+ {"Niue", "Niue", "NU", "NIU", "570"},
+ {"Norfolk Island", "Norfolk (l'ÃŽle)", "NF", "NFK", "574"},
+ {"Norway", "Norvège (la)", "NO", "NOR", "578"},
+ {"Northern Mariana Islands (the)", "Mariannes du Nord (les ÃŽles)", "MP", "MNP", "580"},
+ {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
+ {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
+ {"Marshall Islands (the)", "Marshall (ÃŽles)", "MH", "MHL", "584"},
+ {"Palau", "Palaos (les)", "PW", "PLW", "585"},
+ {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
+ {"Panama", "Panama (le)", "PA", "PAN", "591"},
+ {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
+ {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
+ {"Peru", "Pérou (le)", "PE", "PER", "604"},
+ {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
+ {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
+ {"Poland", "Pologne (la)", "PL", "POL", "616"},
+ {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
+ {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
+ {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
+ {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
+ {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
+ {"Réunion", "Réunion (La)", "RE", "REU", "638"},
+ {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
+ {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
+ {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
+ {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
+ {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
+ {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
+ {"Anguilla", "Anguilla", "AI", "AIA", "660"},
+ {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
+ {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
+ {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
+ {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
+ {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
+ {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
+ {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
+ {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
+ {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
+ {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
+ {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
+ {"Singapore", "Singapour", "SG", "SGP", "702"},
+ {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
+ {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
+ {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
+ {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
+ {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
+ {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
+ {"Spain", "Espagne (l')", "ES", "ESP", "724"},
+ {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
+ {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
+ {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
+ {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
+ {"Svalbard and Jan Mayen", "Svalbard et l'ÃŽle Jan Mayen (le)", "SJ", "SJM", "744"},
+ {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
+ {"Sweden", "Suède (la)", "SE", "SWE", "752"},
+ {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
+ {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
+ {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
+ {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
+ {"Togo", "Togo (le)", "TG", "TGO", "768"},
+ {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
+ {"Tonga", "Tonga (les)", "TO", "TON", "776"},
+ {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
+ {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
+ {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
+ {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
+ {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
+ {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
+ {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
+ {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
+ {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
+ {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
+ {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
+ {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
+ {"Guernsey", "Guernesey", "GG", "GGY", "831"},
+ {"Jersey", "Jersey", "JE", "JEY", "832"},
+ {"Isle of Man", "ÃŽle de Man", "IM", "IMN", "833"},
+ {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
+ {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
+ {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
+ {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
+ {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
+ {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
+ {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
+ {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
+ {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
+ {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
+ {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
+}
+
+// ISO4217List is the list of ISO currency codes
+var ISO4217List = []string{
+ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
+ "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
+ "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
+ "DJF", "DKK", "DOP", "DZD",
+ "EGP", "ERN", "ETB", "EUR",
+ "FJD", "FKP",
+ "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
+ "HKD", "HNL", "HRK", "HTG", "HUF",
+ "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
+ "JMD", "JOD", "JPY",
+ "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
+ "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
+ "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
+ "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
+ "OMR",
+ "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
+ "QAR",
+ "RON", "RSD", "RUB", "RWF",
+ "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
+ "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
+ "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
+ "VEF", "VES", "VND", "VUV",
+ "WST",
+ "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
+ "YER",
+ "ZAR", "ZMW", "ZWL",
+}
+
+// ISO693Entry stores ISO language codes
+type ISO693Entry struct {
+ Alpha3bCode string
+ Alpha2Code string
+ English string
+}
+
+//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
+var ISO693List = []ISO693Entry{
+ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
+ {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
+ {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
+ {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
+ {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
+ {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
+ {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
+ {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
+ {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
+ {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
+ {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
+ {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
+ {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
+ {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
+ {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
+ {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
+ {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
+ {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
+ {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
+ {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
+ {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
+ {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
+ {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
+ {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
+ {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
+ {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
+ {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
+ {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
+ {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
+ {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
+ {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
+ {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
+ {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
+ {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
+ {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
+ {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
+ {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
+ {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
+ {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
+ {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
+ {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
+ {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
+ {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
+ {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
+ {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
+ {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
+ {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
+ {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
+ {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
+ {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
+ {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
+ {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
+ {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
+ {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
+ {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
+ {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
+ {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
+ {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
+ {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
+ {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
+ {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
+ {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
+ {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
+ {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
+ {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
+ {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
+ {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
+ {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
+ {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
+ {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
+ {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
+ {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
+ {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
+ {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
+ {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
+ {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
+ {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
+ {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
+ {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
+ {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
+ {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
+ {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
+ {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
+ {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
+ {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
+ {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
+ {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
+ {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
+ {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
+ {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
+ {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
+ {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
+ {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
+ {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
+ {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
+ {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
+ {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
+ {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
+ {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
+ {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
+ {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
+ {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
+ {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
+ {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
+ {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
+ {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
+ {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
+ {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
+ {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
+ {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
+ {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
+ {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
+ {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
+ {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
+ {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
+ {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
+ {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
+ {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
+ {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
+ {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
+ {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
+ {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
+ {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
+ {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
+ {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
+ {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
+ {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
+ {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
+ {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
+ {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
+ {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
+ {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
+ {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
+ {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
+ {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
+ {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
+ {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
+ {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
+ {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
+ {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
+ {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
+ {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
+ {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
+ {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
+ {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
+ {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
+ {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
+ {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
+ {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
+ {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
+ {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
+ {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
+ {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
+ {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
+ {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
+ {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
+ {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
+ {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
+ {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
+ {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
+ {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
+ {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
+ {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
+ {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
+ {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
+ {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
+ {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
+ {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
+ {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
+ {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
+ {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
+ {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
+ {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
+ {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
+ {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
+ {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
+ {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
+ {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
+ {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
+ {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
+ {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
+ {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
+ {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
+ {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
+}
diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go
new file mode 100644
index 000000000..f4c30f824
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/utils.go
@@ -0,0 +1,270 @@
+package govalidator
+
+import (
+ "errors"
+ "fmt"
+ "html"
+ "math"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Contains checks if the string contains the substring.
+func Contains(str, substring string) bool {
+ return strings.Contains(str, substring)
+}
+
+// Matches checks if string matches the pattern (pattern is regular expression)
+// In case of error return false
+func Matches(str, pattern string) bool {
+ match, _ := regexp.MatchString(pattern, str)
+ return match
+}
+
+// LeftTrim trims characters from the left side of the input.
+// If second argument is empty, it will remove leading spaces.
+func LeftTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimLeftFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("^[" + chars + "]+")
+ return r.ReplaceAllString(str, "")
+}
+
+// RightTrim trims characters from the right side of the input.
+// If second argument is empty, it will remove trailing spaces.
+func RightTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimRightFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("[" + chars + "]+$")
+ return r.ReplaceAllString(str, "")
+}
+
+// Trim trims characters from both sides of the input.
+// If second argument is empty, it will remove spaces.
+func Trim(str, chars string) string {
+ return LeftTrim(RightTrim(str, chars), chars)
+}
+
+// WhiteList removes characters that do not appear in the whitelist.
+func WhiteList(str, chars string) string {
+ pattern := "[^" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// BlackList removes characters that appear in the blacklist.
+func BlackList(str, chars string) string {
+ pattern := "[" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// StripLow removes characters with a numerical value < 32 and 127, mostly control characters.
+// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
+func StripLow(str string, keepNewLines bool) string {
+ chars := ""
+ if keepNewLines {
+ chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
+ } else {
+ chars = "\x00-\x1F\x7F"
+ }
+ return BlackList(str, chars)
+}
+
+// ReplacePattern replaces regular expression pattern in string
+func ReplacePattern(str, pattern, replace string) string {
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, replace)
+}
+
+// Escape replaces <, >, & and " with HTML entities.
+var Escape = html.EscapeString
+
+func addSegment(inrune, segment []rune) []rune {
+ if len(segment) == 0 {
+ return inrune
+ }
+ if len(inrune) != 0 {
+ inrune = append(inrune, '_')
+ }
+ inrune = append(inrune, segment...)
+ return inrune
+}
+
+// UnderscoreToCamelCase converts from underscore separated form to camel case form.
+// Ex.: my_func => MyFunc
+func UnderscoreToCamelCase(s string) string {
+ return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
+}
+
+// CamelCaseToUnderscore converts from camel case form to underscore separated form.
+// Ex.: MyFunc => my_func
+func CamelCaseToUnderscore(str string) string {
+ var output []rune
+ var segment []rune
+ for _, r := range str {
+
+ // not treat number as separate segment
+ if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
+ output = addSegment(output, segment)
+ segment = nil
+ }
+ segment = append(segment, unicode.ToLower(r))
+ }
+ output = addSegment(output, segment)
+ return string(output)
+}
+
+// Reverse returns reversed string
+func Reverse(s string) string {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r)
+}
+
+// GetLines splits string by "\n" and return array of lines
+func GetLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// GetLine returns specified line of multiline string
+func GetLine(s string, index int) (string, error) {
+ lines := GetLines(s)
+ if index < 0 || index >= len(lines) {
+ return "", errors.New("line index out of bounds")
+ }
+ return lines[index], nil
+}
+
+// RemoveTags removes all tags from HTML string
+func RemoveTags(s string) string {
+ return ReplacePattern(s, "<[^>]*>", "")
+}
+
+// SafeFileName returns safe string that can be used in file names
+func SafeFileName(str string) string {
+ name := strings.ToLower(str)
+ name = path.Clean(path.Base(name))
+ name = strings.Trim(name, " ")
+ separators, err := regexp.Compile(`[ &_=+:]`)
+ if err == nil {
+ name = separators.ReplaceAllString(name, "-")
+ }
+ legal, err := regexp.Compile(`[^[:alnum:]-.]`)
+ if err == nil {
+ name = legal.ReplaceAllString(name, "")
+ }
+ for strings.Contains(name, "--") {
+ name = strings.Replace(name, "--", "-", -1)
+ }
+ return name
+}
+
+// NormalizeEmail canonicalize an email address.
+// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
+// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
+// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
+// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
+// normalized to @gmail.com.
+func NormalizeEmail(str string) (string, error) {
+ if !IsEmail(str) {
+ return "", fmt.Errorf("%s is not an email", str)
+ }
+ parts := strings.Split(str, "@")
+ parts[0] = strings.ToLower(parts[0])
+ parts[1] = strings.ToLower(parts[1])
+ if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
+ parts[1] = "gmail.com"
+ parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
+ }
+ return strings.Join(parts, "@"), nil
+}
+
+// Truncate a string to the closest length without breaking words.
+func Truncate(str string, length int, ending string) string {
+ var aftstr, befstr string
+ if len(str) > length {
+ words := strings.Fields(str)
+ before, present := 0, 0
+ for i := range words {
+ befstr = aftstr
+ before = present
+ aftstr = aftstr + words[i] + " "
+ present = len(aftstr)
+ if present > length && i != 0 {
+ if (length - before) < (present - length) {
+ return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ }
+ }
+
+ return str
+}
+
+// PadLeft pads left side of a string if size of string is less then indicated pad length
+func PadLeft(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, false)
+}
+
+// PadRight pads right side of a string if size of string is less then indicated pad length
+func PadRight(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, false, true)
+}
+
+// PadBoth pads both sides of a string if size of string is less then indicated pad length
+func PadBoth(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, true)
+}
+
+// PadString either left, right or both sides.
+// Note that padding string can be unicode and more then one character
+func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
+
+ // When padded length is less then the current string size
+ if padLen < utf8.RuneCountInString(str) {
+ return str
+ }
+
+ padLen -= utf8.RuneCountInString(str)
+
+ targetLen := padLen
+
+ targetLenLeft := targetLen
+ targetLenRight := targetLen
+ if padLeft && padRight {
+ targetLenLeft = padLen / 2
+ targetLenRight = padLen - targetLenLeft
+ }
+
+ strToRepeatLen := utf8.RuneCountInString(padStr)
+
+ repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
+ repeatedString := strings.Repeat(padStr, repeatTimes)
+
+ leftSide := ""
+ if padLeft {
+ leftSide = repeatedString[0:targetLenLeft]
+ }
+
+ rightSide := ""
+ if padRight {
+ rightSide = repeatedString[0:targetLenRight]
+ }
+
+ return leftSide + str + rightSide
+}
+
+// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
+func TruncatingErrorf(str string, args ...interface{}) error {
+ n := strings.Count(str, "%s")
+ return fmt.Errorf(str, args[:n]...)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
new file mode 100644
index 000000000..c9c4fac06
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/validator.go
@@ -0,0 +1,1768 @@
+// Package govalidator is package of validators and sanitizers for strings, structs and collections.
+package govalidator
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ fieldsRequiredByDefault bool
+ nilPtrAllowedByRequired = false
+ notNumberRegexp = regexp.MustCompile("[^0-9]+")
+ whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
+ paramsRegexp = regexp.MustCompile(`\(.*\)$`)
+)
+
+const maxURLRuneCount = 2083
+const minURLRuneCount = 3
+const rfc3339WithoutZone = "2006-01-02T15:04:05"
+
+// SetFieldsRequiredByDefault causes validation to fail when struct fields
+// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
+// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+// type exampleStruct struct {
+// Name string ``
+// Email string `valid:"email"`
+// This, however, will only fail when Email is empty or an invalid email address:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email"`
+// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email,optional"`
+func SetFieldsRequiredByDefault(value bool) {
+ fieldsRequiredByDefault = value
+}
+
+// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
+// The validation will still reject ptr fields in their zero value state. Example with this enabled:
+// type exampleStruct struct {
+// Name *string `valid:"required"`
+// With `Name` set to "", this will be considered invalid input and will cause a validation error.
+// With `Name` set to nil, this will be considered valid by validation.
+// By default this is disabled.
+func SetNilPtrAllowedByRequired(value bool) {
+ nilPtrAllowedByRequired = value
+}
+
+// IsEmail checks if the string is an email.
+func IsEmail(str string) bool {
+ // TODO uppercase letters are not supported
+ return rxEmail.MatchString(str)
+}
+
+// IsExistingEmail checks if the string is an email of existing domain
+func IsExistingEmail(email string) bool {
+
+ if len(email) < 6 || len(email) > 254 {
+ return false
+ }
+ at := strings.LastIndex(email, "@")
+ if at <= 0 || at > len(email)-3 {
+ return false
+ }
+ user := email[:at]
+ host := email[at+1:]
+ if len(user) > 64 {
+ return false
+ }
+ switch host {
+ case "localhost", "example.com":
+ return true
+ }
+ if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
+ return false
+ }
+ if _, err := net.LookupMX(host); err != nil {
+ if _, err := net.LookupIP(host); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsURL checks if the string is an URL.
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ strTemp := str
+ if strings.Contains(str, ":") && !strings.Contains(str, "://") {
+ // support no indicated urlscheme but with colon for port number
+ // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
+ strTemp = "http://" + str
+ }
+ u, err := url.Parse(strTemp)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+}
+
+// IsRequestURL checks if the string rawurl, assuming
+// it was received in an HTTP request, is a valid
+// URL confirm to RFC 3986
+func IsRequestURL(rawurl string) bool {
+ url, err := url.ParseRequestURI(rawurl)
+ if err != nil {
+ return false //Couldn't even parse the rawurl
+ }
+ if len(url.Scheme) == 0 {
+ return false //No Scheme found
+ }
+ return true
+}
+
+// IsRequestURI checks if the string rawurl, assuming
+// it was received in an HTTP request, is an
+// absolute URI or an absolute path.
+func IsRequestURI(rawurl string) bool {
+ _, err := url.ParseRequestURI(rawurl)
+ return err == nil
+}
+
+// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid.
+func IsAlpha(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlpha.MatchString(str)
+}
+
+//IsUTFLetter checks if the string contains only unicode letter characters.
+//Similar to IsAlpha but for all languages. Empty string is valid.
+func IsUTFLetter(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+
+ for _, c := range str {
+ if !unicode.IsLetter(c) {
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid.
+func IsAlphanumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlphanumeric.MatchString(str)
+}
+
+// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid.
+func IsUTFLetterNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ for _, c := range str {
+ if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsNumeric checks if the string contains only numbers. Empty string is valid.
+func IsNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxNumeric.MatchString(str)
+}
+
+// IsUTFNumeric checks if the string contains only unicode numbers of any kind.
+// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
+func IsUTFNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsNumber(c) { //numbers && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+func IsUTFDigit(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsDigit(c) { //digits && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsHexadecimal checks if the string is a hexadecimal number.
+func IsHexadecimal(str string) bool {
+ return rxHexadecimal.MatchString(str)
+}
+
+// IsHexcolor checks if the string is a hexadecimal color.
+func IsHexcolor(str string) bool {
+ return rxHexcolor.MatchString(str)
+}
+
+// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+func IsRGBcolor(str string) bool {
+ return rxRGBcolor.MatchString(str)
+}
+
+// IsLowerCase checks if the string is lowercase. Empty string is valid.
+func IsLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToLower(str)
+}
+
+// IsUpperCase checks if the string is uppercase. Empty string is valid.
+func IsUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToUpper(str)
+}
+
+// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid.
+func HasLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasLowerCase.MatchString(str)
+}
+
+// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid.
+func HasUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasUpperCase.MatchString(str)
+}
+
+// IsInt checks if the string is an integer. Empty string is valid.
+func IsInt(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxInt.MatchString(str)
+}
+
+// IsFloat checks if the string is a float.
+func IsFloat(str string) bool {
+ return str != "" && rxFloat.MatchString(str)
+}
+
+// IsDivisibleBy checks if the string is a number that's divisible by another.
+// If second argument is not valid integer or zero, it's return false.
+// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
+func IsDivisibleBy(str, num string) bool {
+ f, _ := ToFloat(str)
+ p := int64(f)
+ q, _ := ToInt(num)
+ if q == 0 {
+ return false
+ }
+ return (p == 0) || (p%q == 0)
+}
+
+// IsNull checks if the string is null.
+func IsNull(str string) bool {
+ return len(str) == 0
+}
+
+// IsNotNull checks if the string is not null.
+func IsNotNull(str string) bool {
+ return !IsNull(str)
+}
+
+// HasWhitespaceOnly checks the string only contains whitespace
+func HasWhitespaceOnly(str string) bool {
+ return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
+}
+
+// HasWhitespace checks if the string contains any whitespace
+func HasWhitespace(str string) bool {
+ return len(str) > 0 && rxHasWhitespace.MatchString(str)
+}
+
+// IsByteLength checks if the string's length (in bytes) falls in a range.
+func IsByteLength(str string, min, max int) bool {
+ return len(str) >= min && len(str) <= max
+}
+
+// IsUUIDv3 checks if the string is a UUID version 3.
+func IsUUIDv3(str string) bool {
+ return rxUUID3.MatchString(str)
+}
+
+// IsUUIDv4 checks if the string is a UUID version 4.
+func IsUUIDv4(str string) bool {
+ return rxUUID4.MatchString(str)
+}
+
+// IsUUIDv5 checks if the string is a UUID version 5.
+func IsUUIDv5(str string) bool {
+ return rxUUID5.MatchString(str)
+}
+
+// IsUUID checks if the string is a UUID (version 3, 4 or 5).
+func IsUUID(str string) bool {
+ return rxUUID.MatchString(str)
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var ulidDec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const ulidEncodedSize = 26
+
+// IsULID checks if the string is a ULID.
+//
+// Implementation got from:
+// https://github.com/oklog/ulid (Apache-2.0 License)
+//
+func IsULID(str string) bool {
+ // Check if a base32 encoded ULID is the right length.
+ if len(str) != ulidEncodedSize {
+ return false
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if ulidDec[str[0]] == 0xFF ||
+ ulidDec[str[1]] == 0xFF ||
+ ulidDec[str[2]] == 0xFF ||
+ ulidDec[str[3]] == 0xFF ||
+ ulidDec[str[4]] == 0xFF ||
+ ulidDec[str[5]] == 0xFF ||
+ ulidDec[str[6]] == 0xFF ||
+ ulidDec[str[7]] == 0xFF ||
+ ulidDec[str[8]] == 0xFF ||
+ ulidDec[str[9]] == 0xFF ||
+ ulidDec[str[10]] == 0xFF ||
+ ulidDec[str[11]] == 0xFF ||
+ ulidDec[str[12]] == 0xFF ||
+ ulidDec[str[13]] == 0xFF ||
+ ulidDec[str[14]] == 0xFF ||
+ ulidDec[str[15]] == 0xFF ||
+ ulidDec[str[16]] == 0xFF ||
+ ulidDec[str[17]] == 0xFF ||
+ ulidDec[str[18]] == 0xFF ||
+ ulidDec[str[19]] == 0xFF ||
+ ulidDec[str[20]] == 0xFF ||
+ ulidDec[str[21]] == 0xFF ||
+ ulidDec[str[22]] == 0xFF ||
+ ulidDec[str[23]] == 0xFF ||
+ ulidDec[str[24]] == 0xFF ||
+ ulidDec[str[25]] == 0xFF {
+ return false
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if str[0] > '7' {
+ return false
+ }
+ return true
+}
+
+// IsCreditCard checks if the string is a credit card.
+func IsCreditCard(str string) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ if !rxCreditCard.MatchString(sanitized) {
+ return false
+ }
+
+ number, _ := ToInt(sanitized)
+ number, lastDigit := number / 10, number % 10
+
+ var sum int64
+ for i:=0; number > 0; i++ {
+ digit := number % 10
+
+ if i % 2 == 0 {
+ digit *= 2
+ if digit > 9 {
+ digit -= 9
+ }
+ }
+
+ sum += digit
+ number = number / 10
+ }
+
+ return (sum + lastDigit) % 10 == 0
+}
+
+// IsISBN10 checks if the string is an ISBN version 10.
+func IsISBN10(str string) bool {
+ return IsISBN(str, 10)
+}
+
+// IsISBN13 checks if the string is an ISBN version 13.
+func IsISBN13(str string) bool {
+ return IsISBN(str, 13)
+}
+
+// IsISBN checks if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be checks both variants.
+func IsISBN(str string, version int) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ var checksum int32
+ var i int32
+ if version == 10 {
+ if !rxISBN10.MatchString(sanitized) {
+ return false
+ }
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(sanitized[i]-'0')
+ }
+ if sanitized[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(sanitized[9]-'0')
+ }
+ if checksum%11 == 0 {
+ return true
+ }
+ return false
+ } else if version == 13 {
+ if !rxISBN13.MatchString(sanitized) {
+ return false
+ }
+ factor := []int32{1, 3}
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(sanitized[i]-'0')
+ }
+ return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
+ }
+ return IsISBN(str, 10) || IsISBN(str, 13)
+}
+
+// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal).
+func IsJSON(str string) bool {
+ var js json.RawMessage
+ return json.Unmarshal([]byte(str), &js) == nil
+}
+
+// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid.
+func IsMultibyte(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxMultibyte.MatchString(str)
+}
+
+// IsASCII checks if the string contains ASCII chars only. Empty string is valid.
+func IsASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxASCII.MatchString(str)
+}
+
+// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid.
+func IsPrintableASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxPrintableASCII.MatchString(str)
+}
+
+// IsFullWidth checks if the string contains any full-width chars. Empty string is valid.
+func IsFullWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxFullWidth.MatchString(str)
+}
+
+// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid.
+func IsHalfWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str)
+}
+
+// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid.
+func IsVariableWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
+}
+
+// IsBase64 checks if a string is base64 encoded.
+func IsBase64(str string) bool {
+ return rxBase64.MatchString(str)
+}
+
+// IsFilePath checks is a string is Win or Unix file path and returns it's type.
+func IsFilePath(str string) (bool, int) {
+ if rxWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false, Win
+ }
+ return true, Win
+ } else if rxUnixPath.MatchString(str) {
+ return true, Unix
+ }
+ return false, Unknown
+}
+
+//IsWinFilePath checks both relative & absolute paths in Windows
+func IsWinFilePath(str string) bool {
+ if rxARWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false
+ }
+ return true
+ }
+ return false
+}
+
+//IsUnixFilePath checks both relative & absolute paths in Unix
+func IsUnixFilePath(str string) bool {
+ if rxARUnixPath.MatchString(str) {
+ return true
+ }
+ return false
+}
+
+// IsDataURI checks if a string is base64 encoded data URI such as an image
+func IsDataURI(str string) bool {
+ dataURI := strings.Split(str, ",")
+ if !rxDataURI.MatchString(dataURI[0]) {
+ return false
+ }
+ return IsBase64(dataURI[1])
+}
+
+// IsMagnetURI checks if a string is valid magnet URI
+func IsMagnetURI(str string) bool {
+ return rxMagnetURI.MatchString(str)
+}
+
+// IsISO3166Alpha2 checks if a string is valid two-letter country code
+func IsISO3166Alpha2(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO3166Alpha3 checks if a string is valid three-letter country code
+func IsISO3166Alpha3(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha3Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha2 checks if a string is valid two-letter language code
+func IsISO693Alpha2(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha3b checks if a string is valid three-letter language code
+func IsISO693Alpha3b(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha3bCode {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDNSName will validate the given string as a DNS name
+func IsDNSName(str string) bool {
+ if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
+ // constraints already violated
+ return false
+ }
+ return !IsIP(str) && rxDNSName.MatchString(str)
+}
+
+// IsHash checks if a string is a hash of type algorithm.
+// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
+func IsHash(str string, algorithm string) bool {
+ var len string
+ algo := strings.ToLower(algorithm)
+
+ if algo == "crc32" || algo == "crc32b" {
+ len = "8"
+ } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
+ len = "32"
+ } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
+ len = "40"
+ } else if algo == "tiger192" {
+ len = "48"
+ } else if algo == "sha3-224" {
+ len = "56"
+ } else if algo == "sha256" || algo == "sha3-256" {
+ len = "64"
+ } else if algo == "sha384" || algo == "sha3-384" {
+ len = "96"
+ } else if algo == "sha512" || algo == "sha3-512" {
+ len = "128"
+ } else {
+ return false
+ }
+
+ return Matches(str, "^[a-f0-9]{"+len+"}$")
+}
+
+// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")`
+func IsSHA3224(str string) bool {
+ return IsHash(str, "sha3-224")
+}
+
+// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")`
+func IsSHA3256(str string) bool {
+ return IsHash(str, "sha3-256")
+}
+
+// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")`
+func IsSHA3384(str string) bool {
+ return IsHash(str, "sha3-384")
+}
+
+// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")`
+func IsSHA3512(str string) bool {
+ return IsHash(str, "sha3-512")
+}
+
+// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")`
+func IsSHA512(str string) bool {
+ return IsHash(str, "sha512")
+}
+
+// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")`
+func IsSHA384(str string) bool {
+ return IsHash(str, "sha384")
+}
+
+// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")`
+func IsSHA256(str string) bool {
+ return IsHash(str, "sha256")
+}
+
+// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")`
+func IsTiger192(str string) bool {
+ return IsHash(str, "tiger192")
+}
+
+// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")`
+func IsTiger160(str string) bool {
+ return IsHash(str, "tiger160")
+}
+
+// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")`
+func IsRipeMD160(str string) bool {
+ return IsHash(str, "ripemd160")
+}
+
+// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")`
+func IsSHA1(str string) bool {
+ return IsHash(str, "sha1")
+}
+
+// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")`
+func IsTiger128(str string) bool {
+ return IsHash(str, "tiger128")
+}
+
+// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")`
+func IsRipeMD128(str string) bool {
+ return IsHash(str, "ripemd128")
+}
+
+// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")`
+func IsCRC32(str string) bool {
+ return IsHash(str, "crc32")
+}
+
+// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")`
+func IsCRC32b(str string) bool {
+ return IsHash(str, "crc32b")
+}
+
+// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")`
+func IsMD5(str string) bool {
+ return IsHash(str, "md5")
+}
+
+// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")`
+func IsMD4(str string) bool {
+ return IsHash(str, "md4")
+}
+
+// IsDialString validates the given string for usage with the various Dial() functions
+func IsDialString(str string) bool {
+ if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
+ return true
+ }
+
+ return false
+}
+
+// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP`
+func IsIP(str string) bool {
+ return net.ParseIP(str) != nil
+}
+
+// IsPort checks if a string represents a valid port
+func IsPort(str string) bool {
+ if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
+ return true
+ }
+ return false
+}
+
+// IsIPv4 checks if the string is an IP version 4.
+func IsIPv4(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ".")
+}
+
+// IsIPv6 checks if the string is an IP version 6.
+func IsIPv6(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ":")
+}
+
+// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6)
+func IsCIDR(str string) bool {
+ _, _, err := net.ParseCIDR(str)
+ return err == nil
+}
+
+// IsMAC checks if a string is valid MAC address.
+// Possible MAC formats:
+// 01:23:45:67:89:ab
+// 01:23:45:67:89:ab:cd:ef
+// 01-23-45-67-89-ab
+// 01-23-45-67-89-ab-cd-ef
+// 0123.4567.89ab
+// 0123.4567.89ab.cdef
+func IsMAC(str string) bool {
+ _, err := net.ParseMAC(str)
+ return err == nil
+}
+
+// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
+func IsHost(str string) bool {
+ return IsIP(str) || IsDNSName(str)
+}
+
+// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+func IsMongoID(str string) bool {
+ return rxHexadecimal.MatchString(str) && (len(str) == 24)
+}
+
+// IsLatitude checks if a string is valid latitude.
+func IsLatitude(str string) bool {
+ return rxLatitude.MatchString(str)
+}
+
+// IsLongitude checks if a string is valid longitude.
+func IsLongitude(str string) bool {
+ return rxLongitude.MatchString(str)
+}
+
+// IsIMEI checks if a string is valid IMEI
+func IsIMEI(str string) bool {
+ return rxIMEI.MatchString(str)
+}
+
+// IsIMSI checks if a string is valid IMSI
+func IsIMSI(str string) bool {
+ if !rxIMSI.MatchString(str) {
+ return false
+ }
+
+ mcc, err := strconv.ParseInt(str[0:3], 10, 32)
+ if err != nil {
+ return false
+ }
+
+ switch mcc {
+ case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219:
+ case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235:
+ case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257:
+ case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278:
+ case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293:
+ case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314:
+ case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346:
+ case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364:
+ case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402:
+ case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417:
+ case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428:
+ case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441:
+ case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467:
+ case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528:
+ case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545:
+ case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555:
+ case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611:
+ case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621:
+ case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631:
+ case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641:
+ case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652:
+ case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708:
+ case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736:
+ case 738, 740, 742, 744, 746, 748, 750, 995:
+ return true
+ default:
+ return false
+ }
+ return true
+}
+
+// IsRsaPublicKey checks if a string is valid public key with provided length
+func IsRsaPublicKey(str string, keylen int) bool {
+ bb := bytes.NewBufferString(str)
+ pemBytes, err := ioutil.ReadAll(bb)
+ if err != nil {
+ return false
+ }
+ block, _ := pem.Decode(pemBytes)
+ if block != nil && block.Type != "PUBLIC KEY" {
+ return false
+ }
+ var der []byte
+
+ if block != nil {
+ der = block.Bytes
+ } else {
+ der, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return false
+ }
+ }
+
+ key, err := x509.ParsePKIXPublicKey(der)
+ if err != nil {
+ return false
+ }
+ pubkey, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return false
+ }
+ bitlen := len(pubkey.N.Bytes()) * 8
+ return bitlen == int(keylen)
+}
+
+// IsRegex checks if a give string is a valid regex with RE2 syntax or not
+func IsRegex(str string) bool {
+ if _, err := regexp.Compile(str); err == nil {
+ return true
+ }
+ return false
+}
+
+func toJSONName(tag string) string {
+ if tag == "" {
+ return ""
+ }
+
+ // JSON name always comes first. If there's no options then split[0] is
+ // JSON name, if JSON name is not set, then split[0] is an empty string.
+ split := strings.SplitN(tag, ",", 2)
+
+ name := split[0]
+
+ // However it is possible that the field is skipped when
+ // (de-)serializing from/to JSON, in which case assume that there is no
+ // tag name to use
+ if name == "-" {
+ return ""
+ }
+ return name
+}
+
+func prependPathToErrors(err error, path string) error {
+ switch err2 := err.(type) {
+ case Error:
+ err2.Path = append([]string{path}, err2.Path...)
+ return err2
+ case Errors:
+ errors := err2.Errors()
+ for i, err3 := range errors {
+ errors[i] = prependPathToErrors(err3, path)
+ }
+ return err2
+ }
+ return err
+}
+
+// ValidateArray performs validation according to condition iterator that validates every element of the array
+func ValidateArray(array []interface{}, iterator ConditionIterator) bool {
+ return Every(array, iterator)
+}
+
+// ValidateMap use validation map for fields.
+// result will be equal to `false` if there are any errors.
+// s is the map containing the data to be validated.
+// m is the validation map in the form:
+// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}}
+func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ var errs Errors
+ var index int
+ val := reflect.ValueOf(s)
+ for key, value := range s {
+ presentResult := true
+ validator, ok := m[key]
+ if !ok {
+ presentResult = false
+ var err error
+ err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key)
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ valueField := reflect.ValueOf(value)
+ mapResult := true
+ typeResult := true
+ structResult := true
+ resultField := true
+ switch subValidator := validator.(type) {
+ case map[string]interface{}:
+ var err error
+ if v, ok := value.(map[string]interface{}); !ok {
+ mapResult = false
+ err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ } else {
+ mapResult, err = ValidateMap(v, subValidator)
+ if err != nil {
+ mapResult = false
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ case string:
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ subValidator != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err = typeCheck(valueField, reflect.StructField{
+ Name: key,
+ PkgPath: "",
+ Type: val.Type(),
+ Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)),
+ Offset: 0,
+ Index: []int{index},
+ Anonymous: false,
+ }, val, nil)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case nil:
+ // already handlerd when checked before
+ default:
+ typeResult = false
+ err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ result = result && presentResult && typeResult && resultField && structResult && mapResult
+ index++
+ }
+ // checks required keys
+ requiredResult := true
+ for key, value := range m {
+ if schema, ok := value.(string); ok {
+ tags := parseTagIntoMap(schema)
+ if required, ok := tags["required"]; ok {
+ if _, ok := s[key]; !ok {
+ requiredResult = false
+ if required.customErrorMessage != "" {
+ err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}}
+ } else {
+ err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}}
+ }
+ errs = append(errs, err)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result && requiredResult, err
+}
+
+// ValidateStruct use tags for fields.
+// result will be equal to `false` if there are any errors.
+// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail)
+func ValidateStruct(s interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ val := reflect.ValueOf(s)
+ if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ // we only accept structs
+ if val.Kind() != reflect.Struct {
+ return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
+ }
+ var errs Errors
+ for i := 0; i < val.NumField(); i++ {
+ valueField := val.Field(i)
+ typeField := val.Type().Field(i)
+ if typeField.PkgPath != "" {
+ continue // Private field
+ }
+ structResult := true
+ if valueField.Kind() == reflect.Interface {
+ valueField = valueField.Elem()
+ }
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ typeField.Tag.Get(tagName) != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, typeField.Name)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err2 := typeCheck(valueField, typeField, val, nil)
+ if err2 != nil {
+
+ // Replace structure name with JSON name if there is a tag on the variable
+ jsonTag := toJSONName(typeField.Tag.Get("json"))
+ if jsonTag != "" {
+ switch jsonError := err2.(type) {
+ case Error:
+ jsonError.Name = jsonTag
+ err2 = jsonError
+ case Errors:
+ for i2, err3 := range jsonError {
+ switch customErr := err3.(type) {
+ case Error:
+ customErr.Name = jsonTag
+ jsonError[i2] = customErr
+ }
+ }
+
+ err2 = jsonError
+ }
+ }
+
+ errs = append(errs, err2)
+ }
+ result = result && resultField && structResult
+ }
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result, err
+}
+
+// ValidateStructAsync performs async validation of the struct and returns results through the channels
+func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateStruct(s)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// ValidateMapAsync performs async validation of the map and returns results through the channels
+func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateMap(s, m)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
+func parseTagIntoMap(tag string) tagOptionsMap {
+ optionsMap := make(tagOptionsMap)
+ options := strings.Split(tag, ",")
+
+ for i, option := range options {
+ option = strings.TrimSpace(option)
+
+ validationOptions := strings.Split(option, "~")
+ if !isValidTag(validationOptions[0]) {
+ continue
+ }
+ if len(validationOptions) == 2 {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
+ } else {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
+ }
+ }
+ return optionsMap
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// IsSSN will validate the given string as a U.S. Social Security Number
+func IsSSN(str string) bool {
+ if str == "" || len(str) != 11 {
+ return false
+ }
+ return rxSSN.MatchString(str)
+}
+
+// IsSemver checks if string is valid semantic version
+func IsSemver(str string) bool {
+ return rxSemver.MatchString(str)
+}
+
+// IsType checks if interface is of some type
+func IsType(v interface{}, params ...string) bool {
+ if len(params) == 1 {
+ typ := params[0]
+ return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1)
+ }
+ return false
+}
+
+// IsTime checks if string is valid according to given format
+func IsTime(str string, format string) bool {
+ _, err := time.Parse(format, str)
+ return err == nil
+}
+
+// IsUnixTime checks if string is valid unix timestamp value
+func IsUnixTime(str string) bool {
+ if _, err := strconv.Atoi(str); err == nil {
+ return true
+ }
+ return false
+}
+
+// IsRFC3339 checks if string is valid timestamp value according to RFC3339
+func IsRFC3339(str string) bool {
+ return IsTime(str, time.RFC3339)
+}
+
+// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone.
+func IsRFC3339WithoutZone(str string) bool {
+ return IsTime(str, rfc3339WithoutZone)
+}
+
+// IsISO4217 checks if string is valid ISO currency code
+func IsISO4217(str string) bool {
+ for _, currency := range ISO4217List {
+ if str == currency {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ByteLength checks string's length
+func ByteLength(str string, params ...string) bool {
+ if len(params) == 2 {
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return len(str) >= int(min) && len(str) <= int(max)
+ }
+
+ return false
+}
+
+// RuneLength checks string's length
+// Alias for StringLength
+func RuneLength(str string, params ...string) bool {
+ return StringLength(str, params...)
+}
+
+// IsRsaPub checks whether string is valid RSA key
+// Alias for IsRsaPublicKey
+func IsRsaPub(str string, params ...string) bool {
+ if len(params) == 1 {
+ len, _ := ToInt(params[0])
+ return IsRsaPublicKey(str, int(len))
+ }
+
+ return false
+}
+
+// StringMatches checks if a string matches a given pattern.
+func StringMatches(s string, params ...string) bool {
+ if len(params) == 1 {
+ pattern := params[0]
+ return Matches(s, pattern)
+ }
+ return false
+}
+
+// StringLength checks string's length (including multi byte strings)
+func StringLength(str string, params ...string) bool {
+
+ if len(params) == 2 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return strLength >= int(min) && strLength <= int(max)
+ }
+
+ return false
+}
+
+// MinStringLength checks string's minimum length (including multi byte strings)
+func MinStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ return strLength >= int(min)
+ }
+
+ return false
+}
+
+// MaxStringLength checks string's maximum length (including multi byte strings)
+func MaxStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ max, _ := ToInt(params[0])
+ return strLength <= int(max)
+ }
+
+ return false
+}
+
+// Range checks string's length
+func Range(str string, params ...string) bool {
+ if len(params) == 2 {
+ value, _ := ToFloat(str)
+ min, _ := ToFloat(params[0])
+ max, _ := ToFloat(params[1])
+ return InRange(value, min, max)
+ }
+
+ return false
+}
+
+// IsInRaw checks if string is in list of allowed values
+func IsInRaw(str string, params ...string) bool {
+ if len(params) == 1 {
+ rawParams := params[0]
+
+ parsedParams := strings.Split(rawParams, "|")
+
+ return IsIn(str, parsedParams...)
+ }
+
+ return false
+}
+
+// IsIn checks if string str is a member of the set of strings params
+func IsIn(str string, params ...string) bool {
+ for _, param := range params {
+ if str == param {
+ return true
+ }
+ }
+
+ return false
+}
+
+func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
+ if nilPtrAllowedByRequired {
+ k := v.Kind()
+ if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
+ return true, nil
+ }
+ }
+
+ if requiredOption, isRequired := options["required"]; isRequired {
+ if len(requiredOption.customErrorMessage) > 0 {
+ return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
+ } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
+ return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
+ }
+ // not required and empty is valid
+ return true, nil
+}
+
+func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
+ if !v.IsValid() {
+ return false, nil
+ }
+
+ tag := t.Tag.Get(tagName)
+
+ // checks if the field should be ignored
+ switch tag {
+ case "":
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
+ if !fieldsRequiredByDefault {
+ return true, nil
+ }
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
+ }
+ case "-":
+ return true, nil
+ }
+
+ isRootType := false
+ if options == nil {
+ isRootType = true
+ options = parseTagIntoMap(tag)
+ }
+
+ if isEmptyValue(v) {
+ // an empty value is not validated, checks only required
+ isValid, resultErr = checkRequired(v, t, options)
+ for key := range options {
+ delete(options, key)
+ }
+ return isValid, resultErr
+ }
+
+ var customTypeErrors Errors
+ optionsOrder := options.orderedKeys()
+ for _, validatorName := range optionsOrder {
+ validatorStruct := options[validatorName]
+ if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
+ delete(options, validatorName)
+
+ if result := validatefunc(v.Interface(), o.Interface()); !result {
+ if len(validatorStruct.customErrorMessage) > 0 {
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
+ continue
+ }
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
+ }
+ }
+ }
+
+ if len(customTypeErrors.Errors()) > 0 {
+ return false, customTypeErrors
+ }
+
+ if isRootType {
+ // Ensure that we've checked the value by all specified validators before report that the value is valid
+ defer func() {
+ delete(options, "optional")
+ delete(options, "required")
+
+ if isValid && resultErr == nil && len(options) != 0 {
+ optionsOrder := options.orderedKeys()
+ for _, validator := range optionsOrder {
+ isValid = false
+ resultErr = Error{t.Name, fmt.Errorf(
+ "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
+ return
+ }
+ }
+ }()
+ }
+
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for interface param validators
+ for key, value := range InterfaceParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := InterfaceParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ field := fmt.Sprint(v)
+ if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ // for each tag option checks the map of validator functions
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for param validators
+ for key, value := range ParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := ParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ // type not yet supported, fail
+ return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
+ }
+ }
+
+ if validatefunc, ok := TagMap[validator]; ok {
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field); !result && !negate || result && negate {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ //Not Yet Supported Types (Fail here!)
+ err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
+ return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+ return true, nil
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ result := true
+ for i, k := range sv {
+ var resultItem bool
+ var err error
+ if v.MapIndex(k).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Slice, reflect.Array:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Interface:
+ // If the value is an interface then encode its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return ValidateStruct(v.Interface())
+ case reflect.Ptr:
+ // If the value is a pointer then checks its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return typeCheck(v.Elem(), t, o, options)
+ case reflect.Struct:
+ return true, nil
+ default:
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func stripParams(validatorString string) string {
+ return paramsRegexp.ReplaceAllString(validatorString, "")
+}
+
+// isEmptyValue checks whether value empty or not
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String, reflect.Array:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice:
+ return v.Len() == 0 || v.IsNil()
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+}
+
+// ErrorByField returns error for specified field of the struct
+// validated by ValidateStruct or empty string if there are no errors
+// or this field doesn't exists or doesn't have any errors.
+func ErrorByField(e error, field string) string {
+ if e == nil {
+ return ""
+ }
+ return ErrorsByField(e)[field]
+}
+
+// ErrorsByField returns map of errors of the struct validated
+// by ValidateStruct or empty map if there are no errors.
+func ErrorsByField(e error) map[string]string {
+ m := make(map[string]string)
+ if e == nil {
+ return m
+ }
+ // prototype for ValidateStruct
+
+ switch e := e.(type) {
+ case Error:
+ m[e.Name] = e.Err.Error()
+ case Errors:
+ for _, item := range e.Errors() {
+ n := ErrorsByField(item)
+ for k, v := range n {
+ m[k] = v
+ }
+ }
+ }
+
+ return m
+}
+
+// Error returns string equivalent for reflect.Type
+func (e *UnsupportedTypeError) Error() string {
+ return "validator: unsupported type: " + e.Type.String()
+}
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+func IsE164(str string) bool {
+ return rxE164.MatchString(str)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml
new file mode 100644
index 000000000..bc5f7b086
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/wercker.yml
@@ -0,0 +1,15 @@
+box: golang
+build:
+ steps:
+ - setup-go-workspace
+
+ - script:
+ name: go get
+ code: |
+ go version
+ go get -t ./...
+
+ - script:
+ name: go test
+ code: |
+ go test -race -v ./...
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
deleted file mode 100644
index 339177be6..000000000
--- a/vendor/github.com/beorn7/perks/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287d7..000000000
--- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
deleted file mode 100644
index d7d14f8eb..000000000
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
- "math"
- "sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
- Value float64 `json:",string"`
- Width float64 `json:",string"`
- Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * r
- }
- return newStream(Æ’)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * (s.n - r)
- }
- return newStream(Æ’)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targetMap map[float64]float64) *Stream {
- // Convert map to slice to avoid slow iterations on a map.
- // Æ’ is called on the hot path, so converting the map to a slice
- // beforehand results in significant CPU savings.
- targets := targetMapToSlice(targetMap)
-
- Æ’ := func(s *stream, r float64) float64 {
- var m = math.MaxFloat64
- var f float64
- for _, t := range targets {
- if t.quantile*s.n <= r {
- f = (2 * t.epsilon * r) / t.quantile
- } else {
- f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
- }
- if f < m {
- m = f
- }
- }
- return m
- }
- return newStream(Æ’)
-}
-
-type target struct {
- quantile float64
- epsilon float64
-}
-
-func targetMapToSlice(targetMap map[float64]float64) []target {
- targets := make([]target, 0, len(targetMap))
-
- for quantile, epsilon := range targetMap {
- t := target{
- quantile: quantile,
- epsilon: epsilon,
- }
- targets = append(targets, t)
- }
-
- return targets
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
- *stream
- b Samples
- sorted bool
-}
-
-func newStream(Æ’ invariant) *Stream {
- x := &stream{Æ’: Æ’}
- return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
- s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
- s.b = append(s.b, sample)
- s.sorted = false
- if len(s.b) == cap(s.b) {
- s.flush()
- }
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
- if !s.flushed() {
- // Fast path when there hasn't been enough data for a flush;
- // this also yields better accuracy for small sets of data.
- l := len(s.b)
- if l == 0 {
- return 0
- }
- i := int(math.Ceil(float64(l) * q))
- if i > 0 {
- i -= 1
- }
- s.maybeSort()
- return s.b[i].Value
- }
- s.flush()
- return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
- sort.Sort(samples)
- s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
- s.stream.reset()
- s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
- if !s.flushed() {
- return s.b
- }
- s.flush()
- return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
- return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
- s.maybeSort()
- s.stream.merge(s.b)
- s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
- if !s.sorted {
- s.sorted = true
- sort.Sort(s.b)
- }
-}
-
-func (s *Stream) flushed() bool {
- return len(s.stream.l) > 0
-}
-
-type stream struct {
- n float64
- l []Sample
- Æ’ invariant
-}
-
-func (s *stream) reset() {
- s.l = s.l[:0]
- s.n = 0
-}
-
-func (s *stream) insert(v float64) {
- s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
- // TODO(beorn7): This tries to merge not only individual samples, but
- // whole summaries. The paper doesn't mention merging summaries at
- // all. Unittests show that the merging is inaccurate. Find out how to
- // do merges properly.
- var r float64
- i := 0
- for _, sample := range samples {
- for ; i < len(s.l); i++ {
- c := s.l[i]
- if c.Value > sample.Value {
- // Insert at position i.
- s.l = append(s.l, Sample{})
- copy(s.l[i+1:], s.l[i:])
- s.l[i] = Sample{
- sample.Value,
- sample.Width,
- math.Max(sample.Delta, math.Floor(s.Æ’(s, r))-1),
- // TODO(beorn7): How to calculate delta correctly?
- }
- i++
- goto inserted
- }
- r += c.Width
- }
- s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
- i++
- inserted:
- s.n += sample.Width
- r += sample.Width
- }
- s.compress()
-}
-
-func (s *stream) count() int {
- return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
- t := math.Ceil(q * s.n)
- t += math.Ceil(s.Æ’(s, t) / 2)
- p := s.l[0]
- var r float64
- for _, c := range s.l[1:] {
- r += p.Width
- if r+c.Width+c.Delta > t {
- return p.Value
- }
- p = c
- }
- return p.Value
-}
-
-func (s *stream) compress() {
- if len(s.l) < 2 {
- return
- }
- x := s.l[len(s.l)-1]
- xi := len(s.l) - 1
- r := s.n - 1 - x.Width
-
- for i := len(s.l) - 2; i >= 0; i-- {
- c := s.l[i]
- if c.Width+x.Width+x.Delta <= s.Æ’(s, r) {
- x.Width += c.Width
- s.l[xi] = x
- // Remove element at i.
- copy(s.l[i:], s.l[i+1:])
- s.l = s.l[:len(s.l)-1]
- xi -= 1
- } else {
- x = c
- xi = i
- }
- r -= c.Width
- }
-}
-
-func (s *stream) samples() Samples {
- samples := make(Samples, len(s.l))
- copy(samples, s.l)
- return samples
-}
diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml
deleted file mode 100644
index 102fb9a69..000000000
--- a/vendor/github.com/blang/semver/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-language: go
-matrix:
- include:
- - go: 1.4.3
- - go: 1.5.4
- - go: 1.6.3
- - go: 1.7
- - go: tip
- allow_failures:
- - go: tip
-install:
-- go get golang.org/x/tools/cmd/cover
-- go get github.com/mattn/goveralls
-script:
-- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
- -repotoken $COVERALLS_TOKEN
-- echo "Build examples" ; cd examples && go build
-- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
-env:
- global:
- secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE
deleted file mode 100644
index 5ba5c86fc..000000000
--- a/vendor/github.com/blang/semver/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License
-
-Copyright (c) 2014 Benedikt Lang
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
deleted file mode 100644
index 08b2e4a3d..000000000
--- a/vendor/github.com/blang/semver/README.md
+++ /dev/null
@@ -1,194 +0,0 @@
-semver for golang [](https://travis-ci.org/blang/semver) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
-======
-
-semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
-
-Usage
------
-```bash
-$ go get github.com/blang/semver
-```
-Note: Always vendor your dependencies or fix on a specific version tag.
-
-```go
-import github.com/blang/semver
-v1, err := semver.Make("1.0.0-beta")
-v2, err := semver.Make("2.0.0-beta")
-v1.Compare(v2)
-```
-
-Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
-
-Why should I use this lib?
------
-
-- Fully spec compatible
-- No reflection
-- No regex
-- Fully tested (Coverage >99%)
-- Readable parsing/validation errors
-- Fast (See [Benchmarks](#benchmarks))
-- Only Stdlib
-- Uses values instead of pointers
-- Many features, see below
-
-
-Features
------
-
-- Parsing and validation at all levels
-- Comparator-like comparisons
-- Compare Helper Methods
-- InPlace manipulation
-- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
-- Wildcards `>=1.x`, `<=2.5.x`
-- Sortable (implements sort.Interface)
-- database/sql compatible (sql.Scanner/Valuer)
-- encoding/json compatible (json.Marshaler/Unmarshaler)
-
-Ranges
-------
-
-A `Range` is a set of conditions which specify which versions satisfy the range.
-
-A condition is composed of an operator and a version. The supported operators are:
-
-- `<1.0.0` Less than `1.0.0`
-- `<=1.0.0` Less than or equal to `1.0.0`
-- `>1.0.0` Greater than `1.0.0`
-- `>=1.0.0` Greater than or equal to `1.0.0`
-- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
-- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
-
-Note that spaces between the operator and the version will be gracefully tolerated.
-
-A `Range` can link multiple `Ranges` separated by space:
-
-Ranges can be linked by logical AND:
-
- - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
- - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
-
-Ranges can also be linked by logical OR:
-
- - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
-
-AND has a higher precedence than OR. It's not possible to use brackets.
-
-Ranges can be combined by both AND and OR
-
- - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-
-Range usage:
-
-```
-v, err := semver.Parse("1.2.3")
-range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
-if range(v) {
- //valid
-}
-
-```
-
-Example
------
-
-Have a look at full examples in [examples/main.go](examples/main.go)
-
-```go
-import github.com/blang/semver
-
-v, err := semver.Make("0.0.1-alpha.preview+123.github")
-fmt.Printf("Major: %d\n", v.Major)
-fmt.Printf("Minor: %d\n", v.Minor)
-fmt.Printf("Patch: %d\n", v.Patch)
-fmt.Printf("Pre: %s\n", v.Pre)
-fmt.Printf("Build: %s\n", v.Build)
-
-// Prerelease versions array
-if len(v.Pre) > 0 {
- fmt.Println("Prerelease versions:")
- for i, pre := range v.Pre {
- fmt.Printf("%d: %q\n", i, pre)
- }
-}
-
-// Build meta data array
-if len(v.Build) > 0 {
- fmt.Println("Build meta data:")
- for i, build := range v.Build {
- fmt.Printf("%d: %q\n", i, build)
- }
-}
-
-v001, err := semver.Make("0.0.1")
-// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
-v001.GT(v) == true
-v.LT(v001) == true
-v.GTE(v) == true
-v.LTE(v) == true
-
-// Or use v.Compare(v2) for comparisons (-1, 0, 1):
-v001.Compare(v) == 1
-v.Compare(v001) == -1
-v.Compare(v) == 0
-
-// Manipulate Version in place:
-v.Pre[0], err = semver.NewPRVersion("beta")
-if err != nil {
- fmt.Printf("Error parsing pre release version: %q", err)
-}
-
-fmt.Println("\nValidate versions:")
-v.Build[0] = "?"
-
-err = v.Validate()
-if err != nil {
- fmt.Printf("Validation failed: %s\n", err)
-}
-```
-
-
-Benchmarks
------
-
- BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
- BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
- BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
- BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
- BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
- BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
- BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
- BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
- BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
- BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
- BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
- BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
- BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
-
-See benchmark cases at [semver_test.go](semver_test.go)
-
-
-Motivation
------
-
-I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
-
-
-Contribution
------
-
-Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
-
-
-License
------
-
-See [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go
deleted file mode 100644
index a74bf7c44..000000000
--- a/vendor/github.com/blang/semver/json.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package semver
-
-import (
- "encoding/json"
-)
-
-// MarshalJSON implements the encoding/json.Marshaler interface.
-func (v Version) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
-func (v *Version) UnmarshalJSON(data []byte) (err error) {
- var versionString string
-
- if err = json.Unmarshal(data, &versionString); err != nil {
- return
- }
-
- *v, err = Parse(versionString)
-
- return
-}
diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json
deleted file mode 100644
index 1cf8ebdd9..000000000
--- a/vendor/github.com/blang/semver/package.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "author": "blang",
- "bugs": {
- "URL": "https://github.com/blang/semver/issues",
- "url": "https://github.com/blang/semver/issues"
- },
- "gx": {
- "dvcsimport": "github.com/blang/semver"
- },
- "gxVersion": "0.10.0",
- "language": "go",
- "license": "MIT",
- "name": "semver",
- "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
- "version": "3.5.1"
-}
-
diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go
deleted file mode 100644
index fca406d47..000000000
--- a/vendor/github.com/blang/semver/range.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package semver
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode"
-)
-
-type wildcardType int
-
-const (
- noneWildcard wildcardType = iota
- majorWildcard wildcardType = 1
- minorWildcard wildcardType = 2
- patchWildcard wildcardType = 3
-)
-
-func wildcardTypefromInt(i int) wildcardType {
- switch i {
- case 1:
- return majorWildcard
- case 2:
- return minorWildcard
- case 3:
- return patchWildcard
- default:
- return noneWildcard
- }
-}
-
-type comparator func(Version, Version) bool
-
-var (
- compEQ comparator = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 0
- }
- compNE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) != 0
- }
- compGT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 1
- }
- compGE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) >= 0
- }
- compLT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == -1
- }
- compLE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) <= 0
- }
-)
-
-type versionRange struct {
- v Version
- c comparator
-}
-
-// rangeFunc creates a Range from the given versionRange.
-func (vr *versionRange) rangeFunc() Range {
- return Range(func(v Version) bool {
- return vr.c(v, vr.v)
- })
-}
-
-// Range represents a range of versions.
-// A Range can be used to check if a Version satisfies it:
-//
-// range, err := semver.ParseRange(">1.0.0 <2.0.0")
-// range(semver.MustParse("1.1.1") // returns true
-type Range func(Version) bool
-
-// OR combines the existing Range with another Range using logical OR.
-func (rf Range) OR(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) || f(v)
- })
-}
-
-// AND combines the existing Range with another Range using logical AND.
-func (rf Range) AND(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) && f(v)
- })
-}
-
-// ParseRange parses a range and returns a Range.
-// If the range could not be parsed an error is returned.
-//
-// Valid ranges are:
-// - "<1.0.0"
-// - "<=1.0.0"
-// - ">1.0.0"
-// - ">=1.0.0"
-// - "1.0.0", "=1.0.0", "==1.0.0"
-// - "!1.0.0", "!=1.0.0"
-//
-// A Range can consist of multiple ranges separated by space:
-// Ranges can be linked by logical AND:
-// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
-// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
-//
-// Ranges can also be linked by logical OR:
-// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
-//
-// AND has a higher precedence than OR. It's not possible to use brackets.
-//
-// Ranges can be combined by both AND and OR
-//
-// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-func ParseRange(s string) (Range, error) {
- parts := splitAndTrim(s)
- orParts, err := splitORParts(parts)
- if err != nil {
- return nil, err
- }
- expandedParts, err := expandWildcardVersion(orParts)
- if err != nil {
- return nil, err
- }
- var orFn Range
- for _, p := range expandedParts {
- var andFn Range
- for _, ap := range p {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
- vr, err := buildVersionRange(opStr, vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
- }
- rf := vr.rangeFunc()
-
- // Set function
- if andFn == nil {
- andFn = rf
- } else { // Combine with existing function
- andFn = andFn.AND(rf)
- }
- }
- if orFn == nil {
- orFn = andFn
- } else {
- orFn = orFn.OR(andFn)
- }
-
- }
- return orFn, nil
-}
-
-// splitORParts splits the already cleaned parts by '||'.
-// Checks for invalid positions of the operator and returns an
-// error if found.
-func splitORParts(parts []string) ([][]string, error) {
- var ORparts [][]string
- last := 0
- for i, p := range parts {
- if p == "||" {
- if i == 0 {
- return nil, fmt.Errorf("First element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:i])
- last = i + 1
- }
- }
- if last == len(parts) {
- return nil, fmt.Errorf("Last element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:])
- return ORparts, nil
-}
-
-// buildVersionRange takes a slice of 2: operator and version
-// and builds a versionRange, otherwise an error.
-func buildVersionRange(opStr, vStr string) (*versionRange, error) {
- c := parseComparator(opStr)
- if c == nil {
- return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
- }
- v, err := Parse(vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
- }
-
- return &versionRange{
- v: v,
- c: c,
- }, nil
-
-}
-
-// inArray checks if a byte is contained in an array of bytes
-func inArray(s byte, list []byte) bool {
- for _, el := range list {
- if el == s {
- return true
- }
- }
- return false
-}
-
-// splitAndTrim splits a range string by spaces and cleans whitespaces
-func splitAndTrim(s string) (result []string) {
- last := 0
- var lastChar byte
- excludeFromSplit := []byte{'>', '<', '='}
- for i := 0; i < len(s); i++ {
- if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
- if last < i-1 {
- result = append(result, s[last:i])
- }
- last = i + 1
- } else if s[i] != ' ' {
- lastChar = s[i]
- }
- }
- if last < len(s)-1 {
- result = append(result, s[last:])
- }
-
- for i, v := range result {
- result[i] = strings.Replace(v, " ", "", -1)
- }
-
- // parts := strings.Split(s, " ")
- // for _, x := range parts {
- // if s := strings.TrimSpace(x); len(s) != 0 {
- // result = append(result, s)
- // }
- // }
- return
-}
-
-// splitComparatorVersion splits the comparator from the version.
-// Input must be free of leading or trailing spaces.
-func splitComparatorVersion(s string) (string, string, error) {
- i := strings.IndexFunc(s, unicode.IsDigit)
- if i == -1 {
- return "", "", fmt.Errorf("Could not get version from string: %q", s)
- }
- return strings.TrimSpace(s[0:i]), s[i:], nil
-}
-
-// getWildcardType will return the type of wildcard that the
-// passed version contains
-func getWildcardType(vStr string) wildcardType {
- parts := strings.Split(vStr, ".")
- nparts := len(parts)
- wildcard := parts[nparts-1]
-
- possibleWildcardType := wildcardTypefromInt(nparts)
- if wildcard == "x" {
- return possibleWildcardType
- }
-
- return noneWildcard
-}
-
-// createVersionFromWildcard will convert a wildcard version
-// into a regular version, replacing 'x's with '0's, handling
-// special cases like '1.x.x' and '1.x'
-func createVersionFromWildcard(vStr string) string {
- // handle 1.x.x
- vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
- vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
- parts := strings.Split(vStr2, ".")
-
- // handle 1.x
- if len(parts) == 2 {
- return vStr2 + ".0"
- }
-
- return vStr2
-}
-
-// incrementMajorVersion will increment the major version
-// of the passed version
-func incrementMajorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[0])
- if err != nil {
- return "", err
- }
- parts[0] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// incrementMajorVersion will increment the minor version
-// of the passed version
-func incrementMinorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[1])
- if err != nil {
- return "", err
- }
- parts[1] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// expandWildcardVersion will expand wildcards inside versions
-// following these rules:
-//
-// * when dealing with patch wildcards:
-// >= 1.2.x will become >= 1.2.0
-// <= 1.2.x will become < 1.3.0
-// > 1.2.x will become >= 1.3.0
-// < 1.2.x will become < 1.2.0
-// != 1.2.x will become < 1.2.0 >= 1.3.0
-//
-// * when dealing with minor wildcards:
-// >= 1.x will become >= 1.0.0
-// <= 1.x will become < 2.0.0
-// > 1.x will become >= 2.0.0
-// < 1.0 will become < 1.0.0
-// != 1.x will become < 1.0.0 >= 2.0.0
-//
-// * when dealing with wildcards without
-// version operator:
-// 1.2.x will become >= 1.2.0 < 1.3.0
-// 1.x will become >= 1.0.0 < 2.0.0
-func expandWildcardVersion(parts [][]string) ([][]string, error) {
- var expandedParts [][]string
- for _, p := range parts {
- var newParts []string
- for _, ap := range p {
- if strings.Index(ap, "x") != -1 {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
-
- versionWildcardType := getWildcardType(vStr)
- flatVersion := createVersionFromWildcard(vStr)
-
- var resultOperator string
- var shouldIncrementVersion bool
- switch opStr {
- case ">":
- resultOperator = ">="
- shouldIncrementVersion = true
- case ">=":
- resultOperator = ">="
- case "<":
- resultOperator = "<"
- case "<=":
- resultOperator = "<"
- shouldIncrementVersion = true
- case "", "=", "==":
- newParts = append(newParts, ">="+flatVersion)
- resultOperator = "<"
- shouldIncrementVersion = true
- case "!=", "!":
- newParts = append(newParts, "<"+flatVersion)
- resultOperator = ">="
- shouldIncrementVersion = true
- }
-
- var resultVersion string
- if shouldIncrementVersion {
- switch versionWildcardType {
- case patchWildcard:
- resultVersion, _ = incrementMinorVersion(flatVersion)
- case minorWildcard:
- resultVersion, _ = incrementMajorVersion(flatVersion)
- }
- } else {
- resultVersion = flatVersion
- }
-
- ap = resultOperator + resultVersion
- }
- newParts = append(newParts, ap)
- }
- expandedParts = append(expandedParts, newParts)
- }
-
- return expandedParts, nil
-}
-
-func parseComparator(s string) comparator {
- switch s {
- case "==":
- fallthrough
- case "":
- fallthrough
- case "=":
- return compEQ
- case ">":
- return compGT
- case ">=":
- return compGE
- case "<":
- return compLT
- case "<=":
- return compLE
- case "!":
- fallthrough
- case "!=":
- return compNE
- }
-
- return nil
-}
-
-// MustParseRange is like ParseRange but panics if the range cannot be parsed.
-func MustParseRange(s string) Range {
- r, err := ParseRange(s)
- if err != nil {
- panic(`semver: ParseRange(` + s + `): ` + err.Error())
- }
- return r
-}
diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go
deleted file mode 100644
index 8ee0842e6..000000000
--- a/vendor/github.com/blang/semver/semver.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package semver
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
-)
-
-const (
- numbers string = "0123456789"
- alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
- alphanum = alphas + numbers
-)
-
-// SpecVersion is the latest fully supported spec version of semver
-var SpecVersion = Version{
- Major: 2,
- Minor: 0,
- Patch: 0,
-}
-
-// Version represents a semver compatible version
-type Version struct {
- Major uint64
- Minor uint64
- Patch uint64
- Pre []PRVersion
- Build []string //No Precendence
-}
-
-// Version to string
-func (v Version) String() string {
- b := make([]byte, 0, 5)
- b = strconv.AppendUint(b, v.Major, 10)
- b = append(b, '.')
- b = strconv.AppendUint(b, v.Minor, 10)
- b = append(b, '.')
- b = strconv.AppendUint(b, v.Patch, 10)
-
- if len(v.Pre) > 0 {
- b = append(b, '-')
- b = append(b, v.Pre[0].String()...)
-
- for _, pre := range v.Pre[1:] {
- b = append(b, '.')
- b = append(b, pre.String()...)
- }
- }
-
- if len(v.Build) > 0 {
- b = append(b, '+')
- b = append(b, v.Build[0]...)
-
- for _, build := range v.Build[1:] {
- b = append(b, '.')
- b = append(b, build...)
- }
- }
-
- return string(b)
-}
-
-// Equals checks if v is equal to o.
-func (v Version) Equals(o Version) bool {
- return (v.Compare(o) == 0)
-}
-
-// EQ checks if v is equal to o.
-func (v Version) EQ(o Version) bool {
- return (v.Compare(o) == 0)
-}
-
-// NE checks if v is not equal to o.
-func (v Version) NE(o Version) bool {
- return (v.Compare(o) != 0)
-}
-
-// GT checks if v is greater than o.
-func (v Version) GT(o Version) bool {
- return (v.Compare(o) == 1)
-}
-
-// GTE checks if v is greater than or equal to o.
-func (v Version) GTE(o Version) bool {
- return (v.Compare(o) >= 0)
-}
-
-// GE checks if v is greater than or equal to o.
-func (v Version) GE(o Version) bool {
- return (v.Compare(o) >= 0)
-}
-
-// LT checks if v is less than o.
-func (v Version) LT(o Version) bool {
- return (v.Compare(o) == -1)
-}
-
-// LTE checks if v is less than or equal to o.
-func (v Version) LTE(o Version) bool {
- return (v.Compare(o) <= 0)
-}
-
-// LE checks if v is less than or equal to o.
-func (v Version) LE(o Version) bool {
- return (v.Compare(o) <= 0)
-}
-
-// Compare compares Versions v to o:
-// -1 == v is less than o
-// 0 == v is equal to o
-// 1 == v is greater than o
-func (v Version) Compare(o Version) int {
- if v.Major != o.Major {
- if v.Major > o.Major {
- return 1
- }
- return -1
- }
- if v.Minor != o.Minor {
- if v.Minor > o.Minor {
- return 1
- }
- return -1
- }
- if v.Patch != o.Patch {
- if v.Patch > o.Patch {
- return 1
- }
- return -1
- }
-
- // Quick comparison if a version has no prerelease versions
- if len(v.Pre) == 0 && len(o.Pre) == 0 {
- return 0
- } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
- return 1
- } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
- return -1
- }
-
- i := 0
- for ; i < len(v.Pre) && i < len(o.Pre); i++ {
- if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
- continue
- } else if comp == 1 {
- return 1
- } else {
- return -1
- }
- }
-
- // If all pr versions are the equal but one has further prversion, this one greater
- if i == len(v.Pre) && i == len(o.Pre) {
- return 0
- } else if i == len(v.Pre) && i < len(o.Pre) {
- return -1
- } else {
- return 1
- }
-
-}
-
-// Validate validates v and returns error in case
-func (v Version) Validate() error {
- // Major, Minor, Patch already validated using uint64
-
- for _, pre := range v.Pre {
- if !pre.IsNum { //Numeric prerelease versions already uint64
- if len(pre.VersionStr) == 0 {
- return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
- }
- if !containsOnly(pre.VersionStr, alphanum) {
- return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
- }
- }
- }
-
- for _, build := range v.Build {
- if len(build) == 0 {
- return fmt.Errorf("Build meta data can not be empty %q", build)
- }
- if !containsOnly(build, alphanum) {
- return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
- }
- }
-
- return nil
-}
-
-// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
-func New(s string) (vp *Version, err error) {
- v, err := Parse(s)
- vp = &v
- return
-}
-
-// Make is an alias for Parse, parses version string and returns a validated Version or error
-func Make(s string) (Version, error) {
- return Parse(s)
-}
-
-// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
-// specs to be parsed by this library. It does so by normalizing versions before passing them to
-// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
-// with only major and minor components specified
-func ParseTolerant(s string) (Version, error) {
- s = strings.TrimSpace(s)
- s = strings.TrimPrefix(s, "v")
-
- // Split into major.minor.(patch+pr+meta)
- parts := strings.SplitN(s, ".", 3)
- if len(parts) < 3 {
- if strings.ContainsAny(parts[len(parts)-1], "+-") {
- return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
- }
- for len(parts) < 3 {
- parts = append(parts, "0")
- }
- s = strings.Join(parts, ".")
- }
-
- return Parse(s)
-}
-
-// Parse parses version string and returns a validated Version or error
-func Parse(s string) (Version, error) {
- if len(s) == 0 {
- return Version{}, errors.New("Version string empty")
- }
-
- // Split into major.minor.(patch+pr+meta)
- parts := strings.SplitN(s, ".", 3)
- if len(parts) != 3 {
- return Version{}, errors.New("No Major.Minor.Patch elements found")
- }
-
- // Major
- if !containsOnly(parts[0], numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
- }
- if hasLeadingZeroes(parts[0]) {
- return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
- }
- major, err := strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- // Minor
- if !containsOnly(parts[1], numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
- }
- if hasLeadingZeroes(parts[1]) {
- return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
- }
- minor, err := strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- v := Version{}
- v.Major = major
- v.Minor = minor
-
- var build, prerelease []string
- patchStr := parts[2]
-
- if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
- build = strings.Split(patchStr[buildIndex+1:], ".")
- patchStr = patchStr[:buildIndex]
- }
-
- if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
- prerelease = strings.Split(patchStr[preIndex+1:], ".")
- patchStr = patchStr[:preIndex]
- }
-
- if !containsOnly(patchStr, numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
- }
- if hasLeadingZeroes(patchStr) {
- return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
- }
- patch, err := strconv.ParseUint(patchStr, 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- v.Patch = patch
-
- // Prerelease
- for _, prstr := range prerelease {
- parsedPR, err := NewPRVersion(prstr)
- if err != nil {
- return Version{}, err
- }
- v.Pre = append(v.Pre, parsedPR)
- }
-
- // Build meta data
- for _, str := range build {
- if len(str) == 0 {
- return Version{}, errors.New("Build meta data is empty")
- }
- if !containsOnly(str, alphanum) {
- return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
- }
- v.Build = append(v.Build, str)
- }
-
- return v, nil
-}
-
-// MustParse is like Parse but panics if the version cannot be parsed.
-func MustParse(s string) Version {
- v, err := Parse(s)
- if err != nil {
- panic(`semver: Parse(` + s + `): ` + err.Error())
- }
- return v
-}
-
-// PRVersion represents a PreRelease Version
-type PRVersion struct {
- VersionStr string
- VersionNum uint64
- IsNum bool
-}
-
-// NewPRVersion creates a new valid prerelease version
-func NewPRVersion(s string) (PRVersion, error) {
- if len(s) == 0 {
- return PRVersion{}, errors.New("Prerelease is empty")
- }
- v := PRVersion{}
- if containsOnly(s, numbers) {
- if hasLeadingZeroes(s) {
- return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
- }
- num, err := strconv.ParseUint(s, 10, 64)
-
- // Might never be hit, but just in case
- if err != nil {
- return PRVersion{}, err
- }
- v.VersionNum = num
- v.IsNum = true
- } else if containsOnly(s, alphanum) {
- v.VersionStr = s
- v.IsNum = false
- } else {
- return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
- }
- return v, nil
-}
-
-// IsNumeric checks if prerelease-version is numeric
-func (v PRVersion) IsNumeric() bool {
- return v.IsNum
-}
-
-// Compare compares two PreRelease Versions v and o:
-// -1 == v is less than o
-// 0 == v is equal to o
-// 1 == v is greater than o
-func (v PRVersion) Compare(o PRVersion) int {
- if v.IsNum && !o.IsNum {
- return -1
- } else if !v.IsNum && o.IsNum {
- return 1
- } else if v.IsNum && o.IsNum {
- if v.VersionNum == o.VersionNum {
- return 0
- } else if v.VersionNum > o.VersionNum {
- return 1
- } else {
- return -1
- }
- } else { // both are Alphas
- if v.VersionStr == o.VersionStr {
- return 0
- } else if v.VersionStr > o.VersionStr {
- return 1
- } else {
- return -1
- }
- }
-}
-
-// PreRelease version to string
-func (v PRVersion) String() string {
- if v.IsNum {
- return strconv.FormatUint(v.VersionNum, 10)
- }
- return v.VersionStr
-}
-
-func containsOnly(s string, set string) bool {
- return strings.IndexFunc(s, func(r rune) bool {
- return !strings.ContainsRune(set, r)
- }) == -1
-}
-
-func hasLeadingZeroes(s string) bool {
- return len(s) > 1 && s[0] == '0'
-}
-
-// NewBuildVersion creates a new valid build version
-func NewBuildVersion(s string) (string, error) {
- if len(s) == 0 {
- return "", errors.New("Buildversion is empty")
- }
- if !containsOnly(s, alphanum) {
- return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
- }
- return s, nil
-}
diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go
deleted file mode 100644
index e18f88082..000000000
--- a/vendor/github.com/blang/semver/sort.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package semver
-
-import (
- "sort"
-)
-
-// Versions represents multiple versions.
-type Versions []Version
-
-// Len returns length of version collection
-func (s Versions) Len() int {
- return len(s)
-}
-
-// Swap swaps two versions inside the collection by its indices
-func (s Versions) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Less checks if version at index i is less than version at index j
-func (s Versions) Less(i, j int) bool {
- return s[i].LT(s[j])
-}
-
-// Sort sorts a slice of versions
-func Sort(versions []Version) {
- sort.Sort(Versions(versions))
-}
diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go
deleted file mode 100644
index eb4d80266..000000000
--- a/vendor/github.com/blang/semver/sql.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package semver
-
-import (
- "database/sql/driver"
- "fmt"
-)
-
-// Scan implements the database/sql.Scanner interface.
-func (v *Version) Scan(src interface{}) (err error) {
- var str string
- switch src := src.(type) {
- case string:
- str = src
- case []byte:
- str = string(src)
- default:
- return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
- }
-
- if t, err := Parse(str); err == nil {
- *v = t
- }
-
- return
-}
-
-// Value implements the database/sql/driver.Valuer interface.
-func (v Version) Value() (driver.Value, error) {
- return v.String(), nil
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
deleted file mode 100644
index c516ea88d..000000000
--- a/vendor/github.com/cespare/xxhash/v2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
- - "1.x"
- - master
-env:
- - TAGS=""
- - TAGS="-tags purego"
-script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
deleted file mode 100644
index 24b53065f..000000000
--- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
deleted file mode 100644
index 2fd8693c2..000000000
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# xxhash
-
-[](https://godoc.org/github.com/cespare/xxhash)
-[](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-This package provides a straightforward API:
-
-```
-func Sum64(b []byte) uint64
-func Sum64String(s string) uint64
-type Digest struct{ ... }
- func New() *Digest
-```
-
-The `Digest` type implements hash.Hash64. Its key methods are:
-
-```
-func (*Digest) Write([]byte) (int, error)
-func (*Digest) WriteString(string) (int, error)
-func (*Digest) Sum64() uint64
-```
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Compatibility
-
-This package is in a module and the latest code is in version 2 of the module.
-You need a version of Go with at least "minimal module compatibility" to use
-github.com/cespare/xxhash/v2:
-
-* 1.9.7+ for Go 1.9
-* 1.10.3+ for Go 1.10
-* Go 1.11 or later
-
-I recommend using the latest release of Go.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64.
-
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
-
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
-
-```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
-- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
deleted file mode 100644
index 49f67608b..000000000
--- a/vendor/github.com/cespare/xxhash/v2/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/cespare/xxhash/v2
-
-go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
deleted file mode 100644
index db0b35fbe..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "errors"
- "math/bits"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-// Digest implements hash.Hash64.
-type Digest struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total uint64
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
-func New() *Digest {
- var d Digest
- d.Reset()
- return &d
-}
-
-// Reset clears the Digest's state so that it can be reused.
-func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -prime1v
- d.total = 0
- d.n = 0
-}
-
-// Size always returns 8 bytes.
-func (d *Digest) Size() int { return 8 }
-
-// BlockSize always returns 32 bytes.
-func (d *Digest) BlockSize() int { return 32 }
-
-// Write adds more data to d. It always returns len(b), nil.
-func (d *Digest) Write(b []byte) (n int, err error) {
- n = len(b)
- d.total += uint64(n)
-
- if d.n+n < 32 {
- // This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
- d.n += n
- return
- }
-
- if d.n > 0 {
- // Finish off the partial block.
- copy(d.mem[d.n:], b)
- d.v1 = round(d.v1, u64(d.mem[0:8]))
- d.v2 = round(d.v2, u64(d.mem[8:16]))
- d.v3 = round(d.v3, u64(d.mem[16:24]))
- d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
- d.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- nw := writeBlocks(d, b)
- b = b[nw:]
- }
-
- // Store any remaining partial block.
- copy(d.mem[:], b)
- d.n = len(b)
-
- return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-func (d *Digest) Sum(b []byte) []byte {
- s := d.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-// Sum64 returns the current hash.
-func (d *Digest) Sum64() uint64 {
- var h uint64
-
- if d.total >= 32 {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = d.v3 + prime5
- }
-
- h += d.total
-
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-const (
- magic = "xxh\x06"
- marshaledSize = len(magic) + 8*5 + 32
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (d *Digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- b = appendUint64(b, d.v1)
- b = appendUint64(b, d.v2)
- b = appendUint64(b, d.v3)
- b = appendUint64(b, d.v4)
- b = appendUint64(b, d.total)
- b = append(b, d.mem[:d.n]...)
- b = b[:len(b)+len(d.mem)-d.n]
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (d *Digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic):]
- b, d.v1 = consumeUint64(b)
- b, d.v2 = consumeUint64(b)
- b, d.v3 = consumeUint64(b)
- b, d.v4 = consumeUint64(b)
- b, d.total = consumeUint64(b)
- copy(d.mem[:], b)
- b = b[len(d.mem):]
- d.n = int(d.total % uint64(len(d.mem)))
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.LittleEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := u64(b)
- return b[8:], x
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
deleted file mode 100644
index ad14b807f..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-//go:noescape
-func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
deleted file mode 100644
index d580e32ae..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// CX pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// R15 prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
-
- // Load slice.
- MOVQ b_base+0(FP), CX
- MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until CX > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
- ADDQ $24, BX
-
- CMPQ CX, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ R15, AX
-
- CMPQ CX, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ CX, BX
- JG singles
-
- MOVL (CX), R8
- ADDQ $4, CX
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ CX, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ CX, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is CX minus the old base pointer.
- SUBQ b_base+8(FP), CX
- MOVQ CX, ret+32(FP)
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
deleted file mode 100644
index 4a5a82160..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // d := New()
- // d.Write(b)
- // return d.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(d *Digest, b []byte) int {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- n := len(b)
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
- return n - len(b)
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
deleted file mode 100644
index fc9bea7a3..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-func (d *Digest) WriteString(s string) (n int, err error) {
- return d.Write([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
deleted file mode 100644
index 53bf76efb..000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Notes:
-//
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
-//
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
-//
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-func Sum64String(s string) uint64 {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-// It may be faster than Write([]byte(s)) by avoiding a copy.
-func (d *Digest) WriteString(s string) (n int, err error) {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return d.Write(b)
-}
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
index fab974b7f..4b0a5ff58 100644
--- a/vendor/github.com/chzyer/readline/README.md
+++ b/vendor/github.com/chzyer/readline/README.md
@@ -11,7 +11,7 @@
-A powerful readline library in `Linux` `macOS` `Windows` `Solaris`
+A powerful readline library in `Linux` `macOS` `Windows` `Solaris` `AIX`
## Guide
diff --git a/vendor/github.com/chzyer/readline/go.mod b/vendor/github.com/chzyer/readline/go.mod
new file mode 100644
index 000000000..66180f6b1
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/go.mod
@@ -0,0 +1,10 @@
+module github.com/chzyer/readline
+
+go 1.15
+
+require (
+ github.com/chzyer/test v1.0.0
+ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5
+)
+
+require github.com/chzyer/logex v1.2.1
diff --git a/vendor/github.com/chzyer/readline/go.sum b/vendor/github.com/chzyer/readline/go.sum
new file mode 100644
index 000000000..2358df088
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/go.sum
@@ -0,0 +1,6 @@
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go
index 4c31624f8..b60939a91 100644
--- a/vendor/github.com/chzyer/readline/operation.go
+++ b/vendor/github.com/chzyer/readline/operation.go
@@ -109,10 +109,12 @@ func (o *Operation) ioloop() {
keepInSearchMode := false
keepInCompleteMode := false
r := o.t.ReadRune()
+
if o.GetConfig().FuncFilterInputRune != nil {
var process bool
r, process = o.GetConfig().FuncFilterInputRune(r)
if !process {
+ o.t.KickRead()
o.buf.Refresh(nil) // to refresh the line
continue // ignore this rune
}
@@ -434,6 +436,10 @@ func (o *Operation) Slice() ([]byte, error) {
}
func (o *Operation) Close() {
+ select {
+ case o.errchan <- io.EOF:
+ default:
+ }
o.history.Close()
}
diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go
index 0e7aca06d..63b917101 100644
--- a/vendor/github.com/chzyer/readline/readline.go
+++ b/vendor/github.com/chzyer/readline/readline.go
@@ -17,7 +17,9 @@
//
package readline
-import "io"
+import (
+ "io"
+)
type Instance struct {
Config *Config
@@ -270,14 +272,24 @@ func (i *Instance) ReadSlice() ([]byte, error) {
}
// we must make sure that call Close() before process exit.
+// if there has a pending reading operation, that reading will be interrupted.
+// so you can capture the signal and call Instance.Close(), it's thread-safe.
func (i *Instance) Close() error {
+ i.Config.Stdin.Close()
+ i.Operation.Close()
if err := i.Terminal.Close(); err != nil {
return err
}
- i.Config.Stdin.Close()
- i.Operation.Close()
return nil
}
+
+// call CaptureExitSignal when you want readline exit gracefully.
+func (i *Instance) CaptureExitSignal() {
+ CaptureExitSignal(func() {
+ i.Close()
+ })
+}
+
func (i *Instance) Clean() {
i.Operation.Clean()
}
diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go
index 81d2da50c..d95df1e36 100644
--- a/vendor/github.com/chzyer/readline/runebuf.go
+++ b/vendor/github.com/chzyer/readline/runebuf.go
@@ -35,7 +35,7 @@ type RuneBuffer struct {
sync.Mutex
}
-func (r* RuneBuffer) pushKill(text []rune) {
+func (r *RuneBuffer) pushKill(text []rune) {
r.lastKill = append([]rune{}, text...)
}
@@ -221,7 +221,7 @@ func (r *RuneBuffer) DeleteWord() {
}
for i := init + 1; i < len(r.buf); i++ {
if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.pushKill(r.buf[r.idx:i-1])
+ r.pushKill(r.buf[r.idx : i-1])
r.Refresh(func() {
r.buf = append(r.buf[:r.idx], r.buf[i-1:]...)
})
@@ -350,7 +350,7 @@ func (r *RuneBuffer) Yank() {
return
}
r.Refresh(func() {
- buf := make([]rune, 0, len(r.buf) + len(r.lastKill))
+ buf := make([]rune, 0, len(r.buf)+len(r.lastKill))
buf = append(buf, r.buf[:r.idx]...)
buf = append(buf, r.lastKill...)
buf = append(buf, r.buf[r.idx:]...)
diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go
index 133993ca8..ea5db9346 100644
--- a/vendor/github.com/chzyer/readline/term.go
+++ b/vendor/github.com/chzyer/readline/term.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
diff --git a/vendor/github.com/chzyer/readline/term_nosyscall6.go b/vendor/github.com/chzyer/readline/term_nosyscall6.go
new file mode 100644
index 000000000..df9233937
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_nosyscall6.go
@@ -0,0 +1,32 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix os400 solaris
+
+package readline
+
+import "golang.org/x/sys/unix"
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (int, int, error) {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(ws.Col), int(ws.Row), nil
+}
+
+type Termios unix.Termios
+
+func getTermios(fd int) (*Termios, error) {
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+ return (*Termios)(termios), nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios))
+}
diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go
deleted file mode 100644
index 4c27273c7..000000000
--- a/vendor/github.com/chzyer/readline/term_solaris.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-package readline
-
-import "golang.org/x/sys/unix"
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (int, int, error) {
- ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
- if err != nil {
- return 0, 0, err
- }
- return int(ws.Col), int(ws.Row), nil
-}
-
-type Termios unix.Termios
-
-func getTermios(fd int) (*Termios, error) {
- termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
- if err != nil {
- return nil, err
- }
- return (*Termios)(termios), nil
-}
-
-func setTermios(fd int, termios *Termios) error {
- return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios))
-}
diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go
index 1078631c1..38413d0cf 100644
--- a/vendor/github.com/chzyer/readline/terminal.go
+++ b/vendor/github.com/chzyer/readline/terminal.go
@@ -125,6 +125,7 @@ func (t *Terminal) ioloop() {
var (
isEscape bool
isEscapeEx bool
+ isEscapeSS3 bool
expectNextChar bool
)
@@ -152,9 +153,15 @@ func (t *Terminal) ioloop() {
if isEscape {
isEscape = false
if r == CharEscapeEx {
+ // ^][
expectNextChar = true
isEscapeEx = true
continue
+ } else if r == CharO {
+ // ^]O
+ expectNextChar = true
+ isEscapeSS3 = true
+ continue
}
r = escapeKey(r, buf)
} else if isEscapeEx {
@@ -177,6 +184,15 @@ func (t *Terminal) ioloop() {
expectNextChar = true
continue
}
+ } else if isEscapeSS3 {
+ isEscapeSS3 = false
+ if key := readEscKey(r, buf); key != nil {
+ r = escapeSS3Key(key)
+ }
+ if r == 0 {
+ expectNextChar = true
+ continue
+ }
}
expectNextChar = true
diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go
index af4e00521..0706dd4ec 100644
--- a/vendor/github.com/chzyer/readline/utils.go
+++ b/vendor/github.com/chzyer/readline/utils.go
@@ -6,9 +6,11 @@ import (
"container/list"
"fmt"
"os"
+ "os/signal"
"strconv"
"strings"
"sync"
+ "syscall"
"time"
"unicode"
)
@@ -41,6 +43,7 @@ const (
CharCtrlY = 25
CharCtrlZ = 26
CharEsc = 27
+ CharO = 79
CharEscapeEx = 91
CharBackspace = 127
)
@@ -121,6 +124,27 @@ func escapeExKey(key *escapeKeyPair) rune {
return r
}
+// translate EscOX SS3 codes for up/down/etc.
+func escapeSS3Key(key *escapeKeyPair) rune {
+ var r rune
+ switch key.typ {
+ case 'D':
+ r = CharBackward
+ case 'C':
+ r = CharForward
+ case 'A':
+ r = CharPrev
+ case 'B':
+ r = CharNext
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ default:
+ }
+ return r
+}
+
type escapeKeyPair struct {
attr string
typ rune
@@ -275,3 +299,13 @@ func Debug(o ...interface{}) {
fmt.Fprintln(f, o...)
f.Close()
}
+
+func CaptureExitSignal(f func()) {
+ cSignal := make(chan os.Signal, 1)
+ signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ for range cSignal {
+ f()
+ }
+ }()
+}
diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go
index f88dac97b..fc4949232 100644
--- a/vendor/github.com/chzyer/readline/utils_unix.go
+++ b/vendor/github.com/chzyer/readline/utils_unix.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris
package readline
diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/stats/v1/doc.go
deleted file mode 100644
index 23f3cdd4b..000000000
--- a/vendor/github.com/containerd/cgroups/stats/v1/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package v1
diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go
deleted file mode 100644
index 6d2d41770..000000000
--- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go
+++ /dev/null
@@ -1,6125 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: github.com/containerd/cgroups/stats/v1/metrics.proto
-
-package v1
-
-import (
- fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
- reflect "reflect"
- strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type Metrics struct {
- Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"`
- Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"`
- CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"`
- Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"`
- Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"`
- Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"`
- Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"`
- CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"`
- MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metrics) Reset() { *m = Metrics{} }
-func (*Metrics) ProtoMessage() {}
-func (*Metrics) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{0}
-}
-func (m *Metrics) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Metrics.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Metrics) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metrics.Merge(m, src)
-}
-func (m *Metrics) XXX_Size() int {
- return m.Size()
-}
-func (m *Metrics) XXX_DiscardUnknown() {
- xxx_messageInfo_Metrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metrics proto.InternalMessageInfo
-
-type HugetlbStat struct {
- Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"`
- Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
- Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
- Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HugetlbStat) Reset() { *m = HugetlbStat{} }
-func (*HugetlbStat) ProtoMessage() {}
-func (*HugetlbStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{1}
-}
-func (m *HugetlbStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HugetlbStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HugetlbStat.Merge(m, src)
-}
-func (m *HugetlbStat) XXX_Size() int {
- return m.Size()
-}
-func (m *HugetlbStat) XXX_DiscardUnknown() {
- xxx_messageInfo_HugetlbStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo
-
-type PidsStat struct {
- Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"`
- Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PidsStat) Reset() { *m = PidsStat{} }
-func (*PidsStat) ProtoMessage() {}
-func (*PidsStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{2}
-}
-func (m *PidsStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PidsStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PidsStat.Merge(m, src)
-}
-func (m *PidsStat) XXX_Size() int {
- return m.Size()
-}
-func (m *PidsStat) XXX_DiscardUnknown() {
- xxx_messageInfo_PidsStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PidsStat proto.InternalMessageInfo
-
-type CPUStat struct {
- Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"`
- Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CPUStat) Reset() { *m = CPUStat{} }
-func (*CPUStat) ProtoMessage() {}
-func (*CPUStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{3}
-}
-func (m *CPUStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CPUStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CPUStat.Merge(m, src)
-}
-func (m *CPUStat) XXX_Size() int {
- return m.Size()
-}
-func (m *CPUStat) XXX_DiscardUnknown() {
- xxx_messageInfo_CPUStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CPUStat proto.InternalMessageInfo
-
-type CPUUsage struct {
- // values in nanoseconds
- Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
- Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"`
- User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"`
- PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CPUUsage) Reset() { *m = CPUUsage{} }
-func (*CPUUsage) ProtoMessage() {}
-func (*CPUUsage) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{4}
-}
-func (m *CPUUsage) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CPUUsage) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CPUUsage.Merge(m, src)
-}
-func (m *CPUUsage) XXX_Size() int {
- return m.Size()
-}
-func (m *CPUUsage) XXX_DiscardUnknown() {
- xxx_messageInfo_CPUUsage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CPUUsage proto.InternalMessageInfo
-
-type Throttle struct {
- Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"`
- ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"`
- ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Throttle) Reset() { *m = Throttle{} }
-func (*Throttle) ProtoMessage() {}
-func (*Throttle) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{5}
-}
-func (m *Throttle) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Throttle.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Throttle) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Throttle.Merge(m, src)
-}
-func (m *Throttle) XXX_Size() int {
- return m.Size()
-}
-func (m *Throttle) XXX_DiscardUnknown() {
- xxx_messageInfo_Throttle.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Throttle proto.InternalMessageInfo
-
-type MemoryStat struct {
- Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"`
- RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"`
- RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"`
- MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"`
- Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"`
- Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"`
- PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"`
- PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"`
- PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"`
- PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"`
- InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
- ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
- InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
- ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
- Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
- HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"`
- HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"`
- TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"`
- TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"`
- TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"`
- TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"`
- TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"`
- TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"`
- TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"`
- TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"`
- TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"`
- TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"`
- TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"`
- TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"`
- TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"`
- TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"`
- TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"`
- Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"`
- Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"`
- Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"`
- KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemoryStat) Reset() { *m = MemoryStat{} }
-func (*MemoryStat) ProtoMessage() {}
-func (*MemoryStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{6}
-}
-func (m *MemoryStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemoryStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemoryStat.Merge(m, src)
-}
-func (m *MemoryStat) XXX_Size() int {
- return m.Size()
-}
-func (m *MemoryStat) XXX_DiscardUnknown() {
- xxx_messageInfo_MemoryStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemoryStat proto.InternalMessageInfo
-
-type MemoryEntry struct {
- Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
- Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"`
- Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"`
- Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemoryEntry) Reset() { *m = MemoryEntry{} }
-func (*MemoryEntry) ProtoMessage() {}
-func (*MemoryEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{7}
-}
-func (m *MemoryEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemoryEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemoryEntry.Merge(m, src)
-}
-func (m *MemoryEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *MemoryEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_MemoryEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo
-
-type MemoryOomControl struct {
- OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"`
- UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"`
- OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} }
-func (*MemoryOomControl) ProtoMessage() {}
-func (*MemoryOomControl) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{8}
-}
-func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemoryOomControl) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemoryOomControl.Merge(m, src)
-}
-func (m *MemoryOomControl) XXX_Size() int {
- return m.Size()
-}
-func (m *MemoryOomControl) XXX_DiscardUnknown() {
- xxx_messageInfo_MemoryOomControl.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo
-
-type BlkIOStat struct {
- IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"`
- IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"`
- IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"`
- IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"`
- IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"`
- IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"`
- IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"`
- SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BlkIOStat) Reset() { *m = BlkIOStat{} }
-func (*BlkIOStat) ProtoMessage() {}
-func (*BlkIOStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{9}
-}
-func (m *BlkIOStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *BlkIOStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BlkIOStat.Merge(m, src)
-}
-func (m *BlkIOStat) XXX_Size() int {
- return m.Size()
-}
-func (m *BlkIOStat) XXX_DiscardUnknown() {
- xxx_messageInfo_BlkIOStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo
-
-type BlkIOEntry struct {
- Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"`
- Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"`
- Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"`
- Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"`
- Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} }
-func (*BlkIOEntry) ProtoMessage() {}
-func (*BlkIOEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{10}
-}
-func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *BlkIOEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BlkIOEntry.Merge(m, src)
-}
-func (m *BlkIOEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *BlkIOEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_BlkIOEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo
-
-type RdmaStat struct {
- Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"`
- Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RdmaStat) Reset() { *m = RdmaStat{} }
-func (*RdmaStat) ProtoMessage() {}
-func (*RdmaStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{11}
-}
-func (m *RdmaStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RdmaStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RdmaStat.Merge(m, src)
-}
-func (m *RdmaStat) XXX_Size() int {
- return m.Size()
-}
-func (m *RdmaStat) XXX_DiscardUnknown() {
- xxx_messageInfo_RdmaStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RdmaStat proto.InternalMessageInfo
-
-type RdmaEntry struct {
- Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
- HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"`
- HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RdmaEntry) Reset() { *m = RdmaEntry{} }
-func (*RdmaEntry) ProtoMessage() {}
-func (*RdmaEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{12}
-}
-func (m *RdmaEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RdmaEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RdmaEntry.Merge(m, src)
-}
-func (m *RdmaEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *RdmaEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_RdmaEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo
-
-type NetworkStat struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
- RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
- RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
- RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"`
- TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
- TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
- TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
- TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *NetworkStat) Reset() { *m = NetworkStat{} }
-func (*NetworkStat) ProtoMessage() {}
-func (*NetworkStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{13}
-}
-func (m *NetworkStat) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *NetworkStat) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkStat.Merge(m, src)
-}
-func (m *NetworkStat) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkStat) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkStat.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkStat proto.InternalMessageInfo
-
-// CgroupStats exports per-cgroup statistics.
-type CgroupStats struct {
- // number of tasks sleeping
- NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"`
- // number of tasks running
- NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"`
- // number of tasks in stopped state
- NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"`
- // number of tasks in uninterruptible state
- NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"`
- // number of tasks waiting on IO
- NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CgroupStats) Reset() { *m = CgroupStats{} }
-func (*CgroupStats) ProtoMessage() {}
-func (*CgroupStats) Descriptor() ([]byte, []int) {
- return fileDescriptor_a17b2d87c332bfaa, []int{14}
-}
-func (m *CgroupStats) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CgroupStats) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CgroupStats.Merge(m, src)
-}
-func (m *CgroupStats) XXX_Size() int {
- return m.Size()
-}
-func (m *CgroupStats) XXX_DiscardUnknown() {
- xxx_messageInfo_CgroupStats.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CgroupStats proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics")
- proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat")
- proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat")
- proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat")
- proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage")
- proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle")
- proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat")
- proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry")
- proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl")
- proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat")
- proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry")
- proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat")
- proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry")
- proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat")
- proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats")
-}
-
-func init() {
- proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa)
-}
-
-var fileDescriptor_a17b2d87c332bfaa = []byte{
- // 1749 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6,
- 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb,
- 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb,
- 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03,
- 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45,
- 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f,
- 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb,
- 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4,
- 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71,
- 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83,
- 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce,
- 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde,
- 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8,
- 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c,
- 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0,
- 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17,
- 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61,
- 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7,
- 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f,
- 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07,
- 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0,
- 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07,
- 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c,
- 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16,
- 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f,
- 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83,
- 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e,
- 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85,
- 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91,
- 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00,
- 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5,
- 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0,
- 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6,
- 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6,
- 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed,
- 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e,
- 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05,
- 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a,
- 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15,
- 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60,
- 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd,
- 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74,
- 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04,
- 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76,
- 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b,
- 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0,
- 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d,
- 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85,
- 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17,
- 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3,
- 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf,
- 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8,
- 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb,
- 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72,
- 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43,
- 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a,
- 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8,
- 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d,
- 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee,
- 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe,
- 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5,
- 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81,
- 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7,
- 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e,
- 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d,
- 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6,
- 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1,
- 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4,
- 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79,
- 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79,
- 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d,
- 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26,
- 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76,
- 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86,
- 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44,
- 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e,
- 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a,
- 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c,
- 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d,
- 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f,
- 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7,
- 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35,
- 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c,
- 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15,
- 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5,
- 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2,
- 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15,
- 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71,
- 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1,
- 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5,
- 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8,
- 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe,
- 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5,
- 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99,
- 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f,
- 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c,
- 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16,
- 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26,
- 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87,
- 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65,
- 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26,
- 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9,
- 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52,
- 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f,
- 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61,
- 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac,
- 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5,
- 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e,
- 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24,
- 0x22, 0xc4, 0x11, 0x00, 0x00,
-}
-
-func (m *Metrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.MemoryOomControl != nil {
- {
- size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- if m.CgroupStats != nil {
- {
- size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- if len(m.Network) > 0 {
- for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.Rdma != nil {
- {
- size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.Blkio != nil {
- {
- size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.Memory != nil {
- {
- size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.CPU != nil {
- {
- size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Pids != nil {
- {
- size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if len(m.Hugetlb) > 0 {
- for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HugetlbStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Pagesize) > 0 {
- i -= len(m.Pagesize)
- copy(dAtA[i:], m.Pagesize)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize)))
- i--
- dAtA[i] = 0x22
- }
- if m.Failcnt != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
- i--
- dAtA[i] = 0x18
- }
- if m.Max != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
- i--
- dAtA[i] = 0x10
- }
- if m.Usage != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *PidsStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Limit != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
- i--
- dAtA[i] = 0x10
- }
- if m.Current != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Current))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CPUStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Throttling != nil {
- {
- size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Usage != nil {
- {
- size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CPUUsage) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PerCPU) > 0 {
- dAtA11 := make([]byte, len(m.PerCPU)*10)
- var j10 int
- for _, num := range m.PerCPU {
- for num >= 1<<7 {
- dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j10++
- }
- dAtA11[j10] = uint8(num)
- j10++
- }
- i -= j10
- copy(dAtA[i:], dAtA11[:j10])
- i = encodeVarintMetrics(dAtA, i, uint64(j10))
- i--
- dAtA[i] = 0x22
- }
- if m.User != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.User))
- i--
- dAtA[i] = 0x18
- }
- if m.Kernel != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel))
- i--
- dAtA[i] = 0x10
- }
- if m.Total != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Total))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Throttle) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Throttle) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ThrottledTime != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime))
- i--
- dAtA[i] = 0x18
- }
- if m.ThrottledPeriods != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods))
- i--
- dAtA[i] = 0x10
- }
- if m.Periods != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Periods))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemoryStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.KernelTCP != nil {
- {
- size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xa2
- }
- if m.Kernel != nil {
- {
- size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x9a
- }
- if m.Swap != nil {
- {
- size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x92
- }
- if m.Usage != nil {
- {
- size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x8a
- }
- if m.TotalUnevictable != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x80
- }
- if m.TotalActiveFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xf8
- }
- if m.TotalInactiveFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xf0
- }
- if m.TotalActiveAnon != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xe8
- }
- if m.TotalInactiveAnon != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xe0
- }
- if m.TotalPgMajFault != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xd8
- }
- if m.TotalPgFault != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xd0
- }
- if m.TotalPgPgOut != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xc8
- }
- if m.TotalPgPgIn != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xc0
- }
- if m.TotalWriteback != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb8
- }
- if m.TotalDirty != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb0
- }
- if m.TotalMappedFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xa8
- }
- if m.TotalRSSHuge != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xa0
- }
- if m.TotalRSS != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x98
- }
- if m.TotalCache != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x90
- }
- if m.HierarchicalSwapLimit != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x88
- }
- if m.HierarchicalMemoryLimit != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x80
- }
- if m.Unevictable != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable))
- i--
- dAtA[i] = 0x78
- }
- if m.ActiveFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile))
- i--
- dAtA[i] = 0x70
- }
- if m.InactiveFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile))
- i--
- dAtA[i] = 0x68
- }
- if m.ActiveAnon != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon))
- i--
- dAtA[i] = 0x60
- }
- if m.InactiveAnon != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon))
- i--
- dAtA[i] = 0x58
- }
- if m.PgMajFault != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault))
- i--
- dAtA[i] = 0x50
- }
- if m.PgFault != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault))
- i--
- dAtA[i] = 0x48
- }
- if m.PgPgOut != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut))
- i--
- dAtA[i] = 0x40
- }
- if m.PgPgIn != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn))
- i--
- dAtA[i] = 0x38
- }
- if m.Writeback != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback))
- i--
- dAtA[i] = 0x30
- }
- if m.Dirty != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty))
- i--
- dAtA[i] = 0x28
- }
- if m.MappedFile != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile))
- i--
- dAtA[i] = 0x20
- }
- if m.RSSHuge != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge))
- i--
- dAtA[i] = 0x18
- }
- if m.RSS != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RSS))
- i--
- dAtA[i] = 0x10
- }
- if m.Cache != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Cache))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemoryEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Failcnt != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
- i--
- dAtA[i] = 0x20
- }
- if m.Max != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
- i--
- dAtA[i] = 0x18
- }
- if m.Usage != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
- i--
- dAtA[i] = 0x10
- }
- if m.Limit != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.OomKill != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill))
- i--
- dAtA[i] = 0x18
- }
- if m.UnderOom != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom))
- i--
- dAtA[i] = 0x10
- }
- if m.OomKillDisable != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *BlkIOStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.SectorsRecursive) > 0 {
- for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- }
- if len(m.IoTimeRecursive) > 0 {
- for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.IoMergedRecursive) > 0 {
- for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if len(m.IoWaitTimeRecursive) > 0 {
- for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- }
- if len(m.IoServiceTimeRecursive) > 0 {
- for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.IoQueuedRecursive) > 0 {
- for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.IoServicedRecursive) > 0 {
- for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.IoServiceBytesRecursive) > 0 {
- for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Value != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Value))
- i--
- dAtA[i] = 0x28
- }
- if m.Minor != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Minor))
- i--
- dAtA[i] = 0x20
- }
- if m.Major != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Major))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Device) > 0 {
- i -= len(m.Device)
- copy(dAtA[i:], m.Device)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Op) > 0 {
- i -= len(m.Op)
- copy(dAtA[i:], m.Op)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RdmaStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Limit) > 0 {
- for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Current) > 0 {
- for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RdmaEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.HcaObjects != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects))
- i--
- dAtA[i] = 0x18
- }
- if m.HcaHandles != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Device) > 0 {
- i -= len(m.Device)
- copy(dAtA[i:], m.Device)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *NetworkStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TxDropped != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped))
- i--
- dAtA[i] = 0x48
- }
- if m.TxErrors != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors))
- i--
- dAtA[i] = 0x40
- }
- if m.TxPackets != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets))
- i--
- dAtA[i] = 0x38
- }
- if m.TxBytes != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes))
- i--
- dAtA[i] = 0x30
- }
- if m.RxDropped != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped))
- i--
- dAtA[i] = 0x28
- }
- if m.RxErrors != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors))
- i--
- dAtA[i] = 0x20
- }
- if m.RxPackets != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets))
- i--
- dAtA[i] = 0x18
- }
- if m.RxBytes != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CgroupStats) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.NrIoWait != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait))
- i--
- dAtA[i] = 0x28
- }
- if m.NrUninterruptible != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible))
- i--
- dAtA[i] = 0x20
- }
- if m.NrStopped != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped))
- i--
- dAtA[i] = 0x18
- }
- if m.NrRunning != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning))
- i--
- dAtA[i] = 0x10
- }
- if m.NrSleeping != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
- offset -= sovMetrics(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Metrics) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Hugetlb) > 0 {
- for _, e := range m.Hugetlb {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Pids != nil {
- l = m.Pids.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.CPU != nil {
- l = m.CPU.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Memory != nil {
- l = m.Memory.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Blkio != nil {
- l = m.Blkio.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Rdma != nil {
- l = m.Rdma.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if len(m.Network) > 0 {
- for _, e := range m.Network {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.CgroupStats != nil {
- l = m.CgroupStats.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.MemoryOomControl != nil {
- l = m.MemoryOomControl.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HugetlbStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Usage != 0 {
- n += 1 + sovMetrics(uint64(m.Usage))
- }
- if m.Max != 0 {
- n += 1 + sovMetrics(uint64(m.Max))
- }
- if m.Failcnt != 0 {
- n += 1 + sovMetrics(uint64(m.Failcnt))
- }
- l = len(m.Pagesize)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *PidsStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Current != 0 {
- n += 1 + sovMetrics(uint64(m.Current))
- }
- if m.Limit != 0 {
- n += 1 + sovMetrics(uint64(m.Limit))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CPUStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Usage != nil {
- l = m.Usage.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Throttling != nil {
- l = m.Throttling.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CPUUsage) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Total != 0 {
- n += 1 + sovMetrics(uint64(m.Total))
- }
- if m.Kernel != 0 {
- n += 1 + sovMetrics(uint64(m.Kernel))
- }
- if m.User != 0 {
- n += 1 + sovMetrics(uint64(m.User))
- }
- if len(m.PerCPU) > 0 {
- l = 0
- for _, e := range m.PerCPU {
- l += sovMetrics(uint64(e))
- }
- n += 1 + sovMetrics(uint64(l)) + l
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Throttle) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Periods != 0 {
- n += 1 + sovMetrics(uint64(m.Periods))
- }
- if m.ThrottledPeriods != 0 {
- n += 1 + sovMetrics(uint64(m.ThrottledPeriods))
- }
- if m.ThrottledTime != 0 {
- n += 1 + sovMetrics(uint64(m.ThrottledTime))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemoryStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Cache != 0 {
- n += 1 + sovMetrics(uint64(m.Cache))
- }
- if m.RSS != 0 {
- n += 1 + sovMetrics(uint64(m.RSS))
- }
- if m.RSSHuge != 0 {
- n += 1 + sovMetrics(uint64(m.RSSHuge))
- }
- if m.MappedFile != 0 {
- n += 1 + sovMetrics(uint64(m.MappedFile))
- }
- if m.Dirty != 0 {
- n += 1 + sovMetrics(uint64(m.Dirty))
- }
- if m.Writeback != 0 {
- n += 1 + sovMetrics(uint64(m.Writeback))
- }
- if m.PgPgIn != 0 {
- n += 1 + sovMetrics(uint64(m.PgPgIn))
- }
- if m.PgPgOut != 0 {
- n += 1 + sovMetrics(uint64(m.PgPgOut))
- }
- if m.PgFault != 0 {
- n += 1 + sovMetrics(uint64(m.PgFault))
- }
- if m.PgMajFault != 0 {
- n += 1 + sovMetrics(uint64(m.PgMajFault))
- }
- if m.InactiveAnon != 0 {
- n += 1 + sovMetrics(uint64(m.InactiveAnon))
- }
- if m.ActiveAnon != 0 {
- n += 1 + sovMetrics(uint64(m.ActiveAnon))
- }
- if m.InactiveFile != 0 {
- n += 1 + sovMetrics(uint64(m.InactiveFile))
- }
- if m.ActiveFile != 0 {
- n += 1 + sovMetrics(uint64(m.ActiveFile))
- }
- if m.Unevictable != 0 {
- n += 1 + sovMetrics(uint64(m.Unevictable))
- }
- if m.HierarchicalMemoryLimit != 0 {
- n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit))
- }
- if m.HierarchicalSwapLimit != 0 {
- n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit))
- }
- if m.TotalCache != 0 {
- n += 2 + sovMetrics(uint64(m.TotalCache))
- }
- if m.TotalRSS != 0 {
- n += 2 + sovMetrics(uint64(m.TotalRSS))
- }
- if m.TotalRSSHuge != 0 {
- n += 2 + sovMetrics(uint64(m.TotalRSSHuge))
- }
- if m.TotalMappedFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalMappedFile))
- }
- if m.TotalDirty != 0 {
- n += 2 + sovMetrics(uint64(m.TotalDirty))
- }
- if m.TotalWriteback != 0 {
- n += 2 + sovMetrics(uint64(m.TotalWriteback))
- }
- if m.TotalPgPgIn != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgPgIn))
- }
- if m.TotalPgPgOut != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgPgOut))
- }
- if m.TotalPgFault != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgFault))
- }
- if m.TotalPgMajFault != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgMajFault))
- }
- if m.TotalInactiveAnon != 0 {
- n += 2 + sovMetrics(uint64(m.TotalInactiveAnon))
- }
- if m.TotalActiveAnon != 0 {
- n += 2 + sovMetrics(uint64(m.TotalActiveAnon))
- }
- if m.TotalInactiveFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalInactiveFile))
- }
- if m.TotalActiveFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalActiveFile))
- }
- if m.TotalUnevictable != 0 {
- n += 2 + sovMetrics(uint64(m.TotalUnevictable))
- }
- if m.Usage != nil {
- l = m.Usage.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.Swap != nil {
- l = m.Swap.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.Kernel != nil {
- l = m.Kernel.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.KernelTCP != nil {
- l = m.KernelTCP.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemoryEntry) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Limit != 0 {
- n += 1 + sovMetrics(uint64(m.Limit))
- }
- if m.Usage != 0 {
- n += 1 + sovMetrics(uint64(m.Usage))
- }
- if m.Max != 0 {
- n += 1 + sovMetrics(uint64(m.Max))
- }
- if m.Failcnt != 0 {
- n += 1 + sovMetrics(uint64(m.Failcnt))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemoryOomControl) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.OomKillDisable != 0 {
- n += 1 + sovMetrics(uint64(m.OomKillDisable))
- }
- if m.UnderOom != 0 {
- n += 1 + sovMetrics(uint64(m.UnderOom))
- }
- if m.OomKill != 0 {
- n += 1 + sovMetrics(uint64(m.OomKill))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *BlkIOStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.IoServiceBytesRecursive) > 0 {
- for _, e := range m.IoServiceBytesRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoServicedRecursive) > 0 {
- for _, e := range m.IoServicedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoQueuedRecursive) > 0 {
- for _, e := range m.IoQueuedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoServiceTimeRecursive) > 0 {
- for _, e := range m.IoServiceTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoWaitTimeRecursive) > 0 {
- for _, e := range m.IoWaitTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoMergedRecursive) > 0 {
- for _, e := range m.IoMergedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoTimeRecursive) > 0 {
- for _, e := range m.IoTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.SectorsRecursive) > 0 {
- for _, e := range m.SectorsRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *BlkIOEntry) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Op)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Device)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Major != 0 {
- n += 1 + sovMetrics(uint64(m.Major))
- }
- if m.Minor != 0 {
- n += 1 + sovMetrics(uint64(m.Minor))
- }
- if m.Value != 0 {
- n += 1 + sovMetrics(uint64(m.Value))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RdmaStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Current) > 0 {
- for _, e := range m.Current {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Limit) > 0 {
- for _, e := range m.Limit {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RdmaEntry) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Device)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.HcaHandles != 0 {
- n += 1 + sovMetrics(uint64(m.HcaHandles))
- }
- if m.HcaObjects != 0 {
- n += 1 + sovMetrics(uint64(m.HcaObjects))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *NetworkStat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.RxBytes != 0 {
- n += 1 + sovMetrics(uint64(m.RxBytes))
- }
- if m.RxPackets != 0 {
- n += 1 + sovMetrics(uint64(m.RxPackets))
- }
- if m.RxErrors != 0 {
- n += 1 + sovMetrics(uint64(m.RxErrors))
- }
- if m.RxDropped != 0 {
- n += 1 + sovMetrics(uint64(m.RxDropped))
- }
- if m.TxBytes != 0 {
- n += 1 + sovMetrics(uint64(m.TxBytes))
- }
- if m.TxPackets != 0 {
- n += 1 + sovMetrics(uint64(m.TxPackets))
- }
- if m.TxErrors != 0 {
- n += 1 + sovMetrics(uint64(m.TxErrors))
- }
- if m.TxDropped != 0 {
- n += 1 + sovMetrics(uint64(m.TxDropped))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CgroupStats) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.NrSleeping != 0 {
- n += 1 + sovMetrics(uint64(m.NrSleeping))
- }
- if m.NrRunning != 0 {
- n += 1 + sovMetrics(uint64(m.NrRunning))
- }
- if m.NrStopped != 0 {
- n += 1 + sovMetrics(uint64(m.NrStopped))
- }
- if m.NrUninterruptible != 0 {
- n += 1 + sovMetrics(uint64(m.NrUninterruptible))
- }
- if m.NrIoWait != 0 {
- n += 1 + sovMetrics(uint64(m.NrIoWait))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovMetrics(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMetrics(x uint64) (n int) {
- return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *Metrics) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForHugetlb := "[]*HugetlbStat{"
- for _, f := range this.Hugetlb {
- repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + ","
- }
- repeatedStringForHugetlb += "}"
- repeatedStringForNetwork := "[]*NetworkStat{"
- for _, f := range this.Network {
- repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + ","
- }
- repeatedStringForNetwork += "}"
- s := strings.Join([]string{`&Metrics{`,
- `Hugetlb:` + repeatedStringForHugetlb + `,`,
- `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`,
- `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`,
- `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`,
- `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`,
- `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`,
- `Network:` + repeatedStringForNetwork + `,`,
- `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`,
- `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *HugetlbStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&HugetlbStat{`,
- `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
- `Max:` + fmt.Sprintf("%v", this.Max) + `,`,
- `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
- `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PidsStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PidsStat{`,
- `Current:` + fmt.Sprintf("%v", this.Current) + `,`,
- `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CPUStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CPUStat{`,
- `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`,
- `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CPUUsage) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CPUUsage{`,
- `Total:` + fmt.Sprintf("%v", this.Total) + `,`,
- `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`,
- `User:` + fmt.Sprintf("%v", this.User) + `,`,
- `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Throttle) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Throttle{`,
- `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`,
- `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`,
- `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MemoryStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MemoryStat{`,
- `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`,
- `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`,
- `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`,
- `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`,
- `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`,
- `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`,
- `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`,
- `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`,
- `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`,
- `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`,
- `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`,
- `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`,
- `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`,
- `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`,
- `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`,
- `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`,
- `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`,
- `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`,
- `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`,
- `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`,
- `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`,
- `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`,
- `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`,
- `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`,
- `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`,
- `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`,
- `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`,
- `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`,
- `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`,
- `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`,
- `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`,
- `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`,
- `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MemoryEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MemoryEntry{`,
- `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
- `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
- `Max:` + fmt.Sprintf("%v", this.Max) + `,`,
- `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MemoryOomControl) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MemoryOomControl{`,
- `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`,
- `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`,
- `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *BlkIOStat) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoServiceBytesRecursive {
- repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoServiceBytesRecursive += "}"
- repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoServicedRecursive {
- repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoServicedRecursive += "}"
- repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoQueuedRecursive {
- repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoQueuedRecursive += "}"
- repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoServiceTimeRecursive {
- repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoServiceTimeRecursive += "}"
- repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoWaitTimeRecursive {
- repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoWaitTimeRecursive += "}"
- repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoMergedRecursive {
- repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoMergedRecursive += "}"
- repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{"
- for _, f := range this.IoTimeRecursive {
- repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForIoTimeRecursive += "}"
- repeatedStringForSectorsRecursive := "[]*BlkIOEntry{"
- for _, f := range this.SectorsRecursive {
- repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + ","
- }
- repeatedStringForSectorsRecursive += "}"
- s := strings.Join([]string{`&BlkIOStat{`,
- `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`,
- `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`,
- `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`,
- `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`,
- `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`,
- `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`,
- `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`,
- `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *BlkIOEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&BlkIOEntry{`,
- `Op:` + fmt.Sprintf("%v", this.Op) + `,`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `Major:` + fmt.Sprintf("%v", this.Major) + `,`,
- `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *RdmaStat) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForCurrent := "[]*RdmaEntry{"
- for _, f := range this.Current {
- repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + ","
- }
- repeatedStringForCurrent += "}"
- repeatedStringForLimit := "[]*RdmaEntry{"
- for _, f := range this.Limit {
- repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + ","
- }
- repeatedStringForLimit += "}"
- s := strings.Join([]string{`&RdmaStat{`,
- `Current:` + repeatedStringForCurrent + `,`,
- `Limit:` + repeatedStringForLimit + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *RdmaEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&RdmaEntry{`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`,
- `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *NetworkStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&NetworkStat{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`,
- `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`,
- `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`,
- `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`,
- `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`,
- `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`,
- `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`,
- `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CgroupStats) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CgroupStats{`,
- `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`,
- `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`,
- `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`,
- `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`,
- `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`,
- `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringMetrics(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *Metrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Hugetlb = append(m.Hugetlb, &HugetlbStat{})
- if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Pids == nil {
- m.Pids = &PidsStat{}
- }
- if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CPU == nil {
- m.CPU = &CPUStat{}
- }
- if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Memory == nil {
- m.Memory = &MemoryStat{}
- }
- if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Blkio == nil {
- m.Blkio = &BlkIOStat{}
- }
- if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Rdma == nil {
- m.Rdma = &RdmaStat{}
- }
- if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Network = append(m.Network, &NetworkStat{})
- if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CgroupStats == nil {
- m.CgroupStats = &CgroupStats{}
- }
- if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.MemoryOomControl == nil {
- m.MemoryOomControl = &MemoryOomControl{}
- }
- if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HugetlbStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- m.Usage = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Usage |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- m.Max = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Max |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
- }
- m.Failcnt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Failcnt |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Pagesize = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PidsStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PidsStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
- }
- m.Current = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Current |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CPUStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CPUStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Usage == nil {
- m.Usage = &CPUUsage{}
- }
- if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Throttling == nil {
- m.Throttling = &Throttle{}
- }
- if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CPUUsage) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
- }
- m.Total = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Total |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
- }
- m.Kernel = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Kernel |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
- }
- m.User = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.User |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PerCPU = append(m.PerCPU, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.PerCPU) == 0 {
- m.PerCPU = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PerCPU = append(m.PerCPU, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Throttle) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Throttle: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType)
- }
- m.Periods = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Periods |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType)
- }
- m.ThrottledPeriods = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ThrottledPeriods |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType)
- }
- m.ThrottledTime = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ThrottledTime |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemoryStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
- }
- m.Cache = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Cache |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType)
- }
- m.RSS = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RSS |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType)
- }
- m.RSSHuge = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RSSHuge |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType)
- }
- m.MappedFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MappedFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType)
- }
- m.Dirty = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Dirty |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType)
- }
- m.Writeback = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Writeback |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType)
- }
- m.PgPgIn = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgPgIn |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType)
- }
- m.PgPgOut = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgPgOut |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType)
- }
- m.PgFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgFault |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType)
- }
- m.PgMajFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgMajFault |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType)
- }
- m.InactiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InactiveAnon |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType)
- }
- m.ActiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ActiveAnon |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType)
- }
- m.InactiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InactiveFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType)
- }
- m.ActiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ActiveFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 15:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType)
- }
- m.Unevictable = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Unevictable |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 16:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType)
- }
- m.HierarchicalMemoryLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 17:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType)
- }
- m.HierarchicalSwapLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 18:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType)
- }
- m.TotalCache = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalCache |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 19:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType)
- }
- m.TotalRSS = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalRSS |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 20:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType)
- }
- m.TotalRSSHuge = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalRSSHuge |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 21:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType)
- }
- m.TotalMappedFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalMappedFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 22:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType)
- }
- m.TotalDirty = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalDirty |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 23:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType)
- }
- m.TotalWriteback = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalWriteback |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 24:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType)
- }
- m.TotalPgPgIn = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgPgIn |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 25:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType)
- }
- m.TotalPgPgOut = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgPgOut |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 26:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType)
- }
- m.TotalPgFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgFault |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 27:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType)
- }
- m.TotalPgMajFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgMajFault |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 28:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType)
- }
- m.TotalInactiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalInactiveAnon |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 29:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType)
- }
- m.TotalActiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalActiveAnon |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 30:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType)
- }
- m.TotalInactiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalInactiveFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 31:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType)
- }
- m.TotalActiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalActiveFile |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 32:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType)
- }
- m.TotalUnevictable = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalUnevictable |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 33:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Usage == nil {
- m.Usage = &MemoryEntry{}
- }
- if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 34:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Swap == nil {
- m.Swap = &MemoryEntry{}
- }
- if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 35:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Kernel == nil {
- m.Kernel = &MemoryEntry{}
- }
- if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 36:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.KernelTCP == nil {
- m.KernelTCP = &MemoryEntry{}
- }
- if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemoryEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- m.Usage = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Usage |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- m.Max = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Max |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
- }
- m.Failcnt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Failcnt |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemoryOomControl) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType)
- }
- m.OomKillDisable = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.OomKillDisable |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType)
- }
- m.UnderOom = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UnderOom |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType)
- }
- m.OomKill = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.OomKill |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BlkIOStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{})
- if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{})
- if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{})
- if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{})
- if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{})
- if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{})
- if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{})
- if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{})
- if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BlkIOEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Op = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType)
- }
- m.Major = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Major |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType)
- }
- m.Minor = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Minor |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- m.Value = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Value |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RdmaStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Current = append(m.Current, &RdmaEntry{})
- if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Limit = append(m.Limit, &RdmaEntry{})
- if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RdmaEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType)
- }
- m.HcaHandles = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HcaHandles |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType)
- }
- m.HcaObjects = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HcaObjects |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *NetworkStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType)
- }
- m.RxBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RxBytes |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType)
- }
- m.RxPackets = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RxPackets |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType)
- }
- m.RxErrors = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RxErrors |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType)
- }
- m.RxDropped = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RxDropped |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType)
- }
- m.TxBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TxBytes |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType)
- }
- m.TxPackets = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TxPackets |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType)
- }
- m.TxErrors = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TxErrors |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType)
- }
- m.TxDropped = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TxDropped |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CgroupStats) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType)
- }
- m.NrSleeping = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NrSleeping |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType)
- }
- m.NrRunning = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NrRunning |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType)
- }
- m.NrStopped = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NrStopped |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType)
- }
- m.NrUninterruptible = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NrUninterruptible |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType)
- }
- m.NrIoWait = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NrIoWait |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMetrics(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMetrics
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/v3/LICENSE
similarity index 100%
rename from vendor/github.com/containerd/cgroups/LICENSE
rename to vendor/github.com/containerd/cgroups/v3/LICENSE
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go
new file mode 100644
index 000000000..e51e12f80
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go
@@ -0,0 +1,17 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package stats
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go
new file mode 100644
index 000000000..75206889b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go
@@ -0,0 +1,1959 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: github.com/containerd/cgroups/cgroup1/stats/metrics.proto
+
+package stats
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Metrics struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"`
+ Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"`
+ CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"`
+ Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"`
+ Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"`
+ Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"`
+ Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"`
+ CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"`
+ MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"`
+}
+
+func (x *Metrics) Reset() {
+ *x = Metrics{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metrics) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metrics) ProtoMessage() {}
+
+func (x *Metrics) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metrics.ProtoReflect.Descriptor instead.
+func (*Metrics) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Metrics) GetHugetlb() []*HugetlbStat {
+ if x != nil {
+ return x.Hugetlb
+ }
+ return nil
+}
+
+func (x *Metrics) GetPids() *PidsStat {
+ if x != nil {
+ return x.Pids
+ }
+ return nil
+}
+
+func (x *Metrics) GetCPU() *CPUStat {
+ if x != nil {
+ return x.CPU
+ }
+ return nil
+}
+
+func (x *Metrics) GetMemory() *MemoryStat {
+ if x != nil {
+ return x.Memory
+ }
+ return nil
+}
+
+func (x *Metrics) GetBlkio() *BlkIOStat {
+ if x != nil {
+ return x.Blkio
+ }
+ return nil
+}
+
+func (x *Metrics) GetRdma() *RdmaStat {
+ if x != nil {
+ return x.Rdma
+ }
+ return nil
+}
+
+func (x *Metrics) GetNetwork() []*NetworkStat {
+ if x != nil {
+ return x.Network
+ }
+ return nil
+}
+
+func (x *Metrics) GetCgroupStats() *CgroupStats {
+ if x != nil {
+ return x.CgroupStats
+ }
+ return nil
+}
+
+func (x *Metrics) GetMemoryOomControl() *MemoryOomControl {
+ if x != nil {
+ return x.MemoryOomControl
+ }
+ return nil
+}
+
+type HugetlbStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"`
+ Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
+ Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
+ Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"`
+}
+
+func (x *HugetlbStat) Reset() {
+ *x = HugetlbStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HugetlbStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HugetlbStat) ProtoMessage() {}
+
+func (x *HugetlbStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HugetlbStat.ProtoReflect.Descriptor instead.
+func (*HugetlbStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HugetlbStat) GetUsage() uint64 {
+ if x != nil {
+ return x.Usage
+ }
+ return 0
+}
+
+func (x *HugetlbStat) GetMax() uint64 {
+ if x != nil {
+ return x.Max
+ }
+ return 0
+}
+
+func (x *HugetlbStat) GetFailcnt() uint64 {
+ if x != nil {
+ return x.Failcnt
+ }
+ return 0
+}
+
+func (x *HugetlbStat) GetPagesize() string {
+ if x != nil {
+ return x.Pagesize
+ }
+ return ""
+}
+
+type PidsStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"`
+ Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *PidsStat) Reset() {
+ *x = PidsStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PidsStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PidsStat) ProtoMessage() {}
+
+func (x *PidsStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PidsStat.ProtoReflect.Descriptor instead.
+func (*PidsStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *PidsStat) GetCurrent() uint64 {
+ if x != nil {
+ return x.Current
+ }
+ return 0
+}
+
+func (x *PidsStat) GetLimit() uint64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+type CPUStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"`
+ Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"`
+}
+
+func (x *CPUStat) Reset() {
+ *x = CPUStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CPUStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CPUStat) ProtoMessage() {}
+
+func (x *CPUStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CPUStat.ProtoReflect.Descriptor instead.
+func (*CPUStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CPUStat) GetUsage() *CPUUsage {
+ if x != nil {
+ return x.Usage
+ }
+ return nil
+}
+
+func (x *CPUStat) GetThrottling() *Throttle {
+ if x != nil {
+ return x.Throttling
+ }
+ return nil
+}
+
+type CPUUsage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // values in nanoseconds
+ Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
+ Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"`
+ User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"`
+ PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"`
+}
+
+func (x *CPUUsage) Reset() {
+ *x = CPUUsage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CPUUsage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CPUUsage) ProtoMessage() {}
+
+func (x *CPUUsage) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CPUUsage.ProtoReflect.Descriptor instead.
+func (*CPUUsage) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CPUUsage) GetTotal() uint64 {
+ if x != nil {
+ return x.Total
+ }
+ return 0
+}
+
+func (x *CPUUsage) GetKernel() uint64 {
+ if x != nil {
+ return x.Kernel
+ }
+ return 0
+}
+
+func (x *CPUUsage) GetUser() uint64 {
+ if x != nil {
+ return x.User
+ }
+ return 0
+}
+
+func (x *CPUUsage) GetPerCPU() []uint64 {
+ if x != nil {
+ return x.PerCPU
+ }
+ return nil
+}
+
+type Throttle struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"`
+ ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"`
+}
+
+func (x *Throttle) Reset() {
+ *x = Throttle{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Throttle) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Throttle) ProtoMessage() {}
+
+func (x *Throttle) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Throttle.ProtoReflect.Descriptor instead.
+func (*Throttle) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Throttle) GetPeriods() uint64 {
+ if x != nil {
+ return x.Periods
+ }
+ return 0
+}
+
+func (x *Throttle) GetThrottledPeriods() uint64 {
+ if x != nil {
+ return x.ThrottledPeriods
+ }
+ return 0
+}
+
+func (x *Throttle) GetThrottledTime() uint64 {
+ if x != nil {
+ return x.ThrottledTime
+ }
+ return 0
+}
+
+type MemoryStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"`
+ RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"`
+ RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"`
+ MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"`
+ Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"`
+ Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"`
+ PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"`
+ PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"`
+ PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"`
+ PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"`
+ InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
+ ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
+ InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
+ ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
+ Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
+ HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"`
+ HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"`
+ TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"`
+ TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"`
+ TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"`
+ TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"`
+ TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"`
+ TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"`
+ TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"`
+ TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"`
+ TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"`
+ TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"`
+ TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"`
+ TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"`
+ TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"`
+ TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"`
+ TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"`
+ Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"`
+ Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"`
+ Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"`
+ KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"`
+}
+
+func (x *MemoryStat) Reset() {
+ *x = MemoryStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryStat) ProtoMessage() {}
+
+func (x *MemoryStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryStat.ProtoReflect.Descriptor instead.
+func (*MemoryStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MemoryStat) GetCache() uint64 {
+ if x != nil {
+ return x.Cache
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetRSS() uint64 {
+ if x != nil {
+ return x.RSS
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetRSSHuge() uint64 {
+ if x != nil {
+ return x.RSSHuge
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetMappedFile() uint64 {
+ if x != nil {
+ return x.MappedFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetDirty() uint64 {
+ if x != nil {
+ return x.Dirty
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetWriteback() uint64 {
+ if x != nil {
+ return x.Writeback
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetPgPgIn() uint64 {
+ if x != nil {
+ return x.PgPgIn
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetPgPgOut() uint64 {
+ if x != nil {
+ return x.PgPgOut
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetPgFault() uint64 {
+ if x != nil {
+ return x.PgFault
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetPgMajFault() uint64 {
+ if x != nil {
+ return x.PgMajFault
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetInactiveAnon() uint64 {
+ if x != nil {
+ return x.InactiveAnon
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetActiveAnon() uint64 {
+ if x != nil {
+ return x.ActiveAnon
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetInactiveFile() uint64 {
+ if x != nil {
+ return x.InactiveFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetActiveFile() uint64 {
+ if x != nil {
+ return x.ActiveFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetUnevictable() uint64 {
+ if x != nil {
+ return x.Unevictable
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetHierarchicalMemoryLimit() uint64 {
+ if x != nil {
+ return x.HierarchicalMemoryLimit
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetHierarchicalSwapLimit() uint64 {
+ if x != nil {
+ return x.HierarchicalSwapLimit
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalCache() uint64 {
+ if x != nil {
+ return x.TotalCache
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalRSS() uint64 {
+ if x != nil {
+ return x.TotalRSS
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalRSSHuge() uint64 {
+ if x != nil {
+ return x.TotalRSSHuge
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalMappedFile() uint64 {
+ if x != nil {
+ return x.TotalMappedFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalDirty() uint64 {
+ if x != nil {
+ return x.TotalDirty
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalWriteback() uint64 {
+ if x != nil {
+ return x.TotalWriteback
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalPgPgIn() uint64 {
+ if x != nil {
+ return x.TotalPgPgIn
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalPgPgOut() uint64 {
+ if x != nil {
+ return x.TotalPgPgOut
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalPgFault() uint64 {
+ if x != nil {
+ return x.TotalPgFault
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalPgMajFault() uint64 {
+ if x != nil {
+ return x.TotalPgMajFault
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalInactiveAnon() uint64 {
+ if x != nil {
+ return x.TotalInactiveAnon
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalActiveAnon() uint64 {
+ if x != nil {
+ return x.TotalActiveAnon
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalInactiveFile() uint64 {
+ if x != nil {
+ return x.TotalInactiveFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalActiveFile() uint64 {
+ if x != nil {
+ return x.TotalActiveFile
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetTotalUnevictable() uint64 {
+ if x != nil {
+ return x.TotalUnevictable
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetUsage() *MemoryEntry {
+ if x != nil {
+ return x.Usage
+ }
+ return nil
+}
+
+func (x *MemoryStat) GetSwap() *MemoryEntry {
+ if x != nil {
+ return x.Swap
+ }
+ return nil
+}
+
+func (x *MemoryStat) GetKernel() *MemoryEntry {
+ if x != nil {
+ return x.Kernel
+ }
+ return nil
+}
+
+func (x *MemoryStat) GetKernelTCP() *MemoryEntry {
+ if x != nil {
+ return x.KernelTCP
+ }
+ return nil
+}
+
+type MemoryEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
+ Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"`
+ Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"`
+ Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
+}
+
+func (x *MemoryEntry) Reset() {
+ *x = MemoryEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryEntry) ProtoMessage() {}
+
+func (x *MemoryEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryEntry.ProtoReflect.Descriptor instead.
+func (*MemoryEntry) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *MemoryEntry) GetLimit() uint64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *MemoryEntry) GetUsage() uint64 {
+ if x != nil {
+ return x.Usage
+ }
+ return 0
+}
+
+func (x *MemoryEntry) GetMax() uint64 {
+ if x != nil {
+ return x.Max
+ }
+ return 0
+}
+
+func (x *MemoryEntry) GetFailcnt() uint64 {
+ if x != nil {
+ return x.Failcnt
+ }
+ return 0
+}
+
+type MemoryOomControl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"`
+ UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"`
+ OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"`
+}
+
+func (x *MemoryOomControl) Reset() {
+ *x = MemoryOomControl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryOomControl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryOomControl) ProtoMessage() {}
+
+func (x *MemoryOomControl) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryOomControl.ProtoReflect.Descriptor instead.
+func (*MemoryOomControl) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *MemoryOomControl) GetOomKillDisable() uint64 {
+ if x != nil {
+ return x.OomKillDisable
+ }
+ return 0
+}
+
+func (x *MemoryOomControl) GetUnderOom() uint64 {
+ if x != nil {
+ return x.UnderOom
+ }
+ return 0
+}
+
+func (x *MemoryOomControl) GetOomKill() uint64 {
+ if x != nil {
+ return x.OomKill
+ }
+ return 0
+}
+
+type BlkIOStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"`
+ IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"`
+ SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"`
+}
+
+func (x *BlkIOStat) Reset() {
+ *x = BlkIOStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlkIOStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlkIOStat) ProtoMessage() {}
+
+func (x *BlkIOStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlkIOStat.ProtoReflect.Descriptor instead.
+func (*BlkIOStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *BlkIOStat) GetIoServiceBytesRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoServiceBytesRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoServicedRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoServicedRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoQueuedRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoQueuedRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoServiceTimeRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoServiceTimeRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoWaitTimeRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoWaitTimeRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoMergedRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoMergedRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetIoTimeRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.IoTimeRecursive
+ }
+ return nil
+}
+
+func (x *BlkIOStat) GetSectorsRecursive() []*BlkIOEntry {
+ if x != nil {
+ return x.SectorsRecursive
+ }
+ return nil
+}
+
+type BlkIOEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"`
+ Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"`
+ Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"`
+ Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"`
+ Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *BlkIOEntry) Reset() {
+ *x = BlkIOEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlkIOEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlkIOEntry) ProtoMessage() {}
+
+func (x *BlkIOEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlkIOEntry.ProtoReflect.Descriptor instead.
+func (*BlkIOEntry) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *BlkIOEntry) GetOp() string {
+ if x != nil {
+ return x.Op
+ }
+ return ""
+}
+
+func (x *BlkIOEntry) GetDevice() string {
+ if x != nil {
+ return x.Device
+ }
+ return ""
+}
+
+func (x *BlkIOEntry) GetMajor() uint64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *BlkIOEntry) GetMinor() uint64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+func (x *BlkIOEntry) GetValue() uint64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type RdmaStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"`
+ Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *RdmaStat) Reset() {
+ *x = RdmaStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RdmaStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RdmaStat) ProtoMessage() {}
+
+func (x *RdmaStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RdmaStat.ProtoReflect.Descriptor instead.
+func (*RdmaStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *RdmaStat) GetCurrent() []*RdmaEntry {
+ if x != nil {
+ return x.Current
+ }
+ return nil
+}
+
+func (x *RdmaStat) GetLimit() []*RdmaEntry {
+ if x != nil {
+ return x.Limit
+ }
+ return nil
+}
+
+type RdmaEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+ HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"`
+ HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"`
+}
+
+func (x *RdmaEntry) Reset() {
+ *x = RdmaEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RdmaEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RdmaEntry) ProtoMessage() {}
+
+func (x *RdmaEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RdmaEntry.ProtoReflect.Descriptor instead.
+func (*RdmaEntry) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *RdmaEntry) GetDevice() string {
+ if x != nil {
+ return x.Device
+ }
+ return ""
+}
+
+func (x *RdmaEntry) GetHcaHandles() uint32 {
+ if x != nil {
+ return x.HcaHandles
+ }
+ return 0
+}
+
+func (x *RdmaEntry) GetHcaObjects() uint32 {
+ if x != nil {
+ return x.HcaObjects
+ }
+ return 0
+}
+
+type NetworkStat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+ RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+ RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
+ RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"`
+ TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+ TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+ TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+ TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"`
+}
+
+func (x *NetworkStat) Reset() {
+ *x = NetworkStat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetworkStat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkStat) ProtoMessage() {}
+
+func (x *NetworkStat) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkStat.ProtoReflect.Descriptor instead.
+func (*NetworkStat) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *NetworkStat) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NetworkStat) GetRxBytes() uint64 {
+ if x != nil {
+ return x.RxBytes
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetRxPackets() uint64 {
+ if x != nil {
+ return x.RxPackets
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetRxErrors() uint64 {
+ if x != nil {
+ return x.RxErrors
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetRxDropped() uint64 {
+ if x != nil {
+ return x.RxDropped
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetTxBytes() uint64 {
+ if x != nil {
+ return x.TxBytes
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetTxPackets() uint64 {
+ if x != nil {
+ return x.TxPackets
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetTxErrors() uint64 {
+ if x != nil {
+ return x.TxErrors
+ }
+ return 0
+}
+
+func (x *NetworkStat) GetTxDropped() uint64 {
+ if x != nil {
+ return x.TxDropped
+ }
+ return 0
+}
+
+// CgroupStats exports per-cgroup statistics.
+type CgroupStats struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // number of tasks sleeping
+ NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"`
+ // number of tasks running
+ NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"`
+ // number of tasks in stopped state
+ NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"`
+ // number of tasks in uninterruptible state
+ NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"`
+ // number of tasks waiting on IO
+ NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"`
+}
+
+func (x *CgroupStats) Reset() {
+ *x = CgroupStats{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CgroupStats) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CgroupStats) ProtoMessage() {}
+
+func (x *CgroupStats) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CgroupStats.ProtoReflect.Descriptor instead.
+func (*CgroupStats) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *CgroupStats) GetNrSleeping() uint64 {
+ if x != nil {
+ return x.NrSleeping
+ }
+ return 0
+}
+
+func (x *CgroupStats) GetNrRunning() uint64 {
+ if x != nil {
+ return x.NrRunning
+ }
+ return 0
+}
+
+func (x *CgroupStats) GetNrStopped() uint64 {
+ if x != nil {
+ return x.NrStopped
+ }
+ return 0
+}
+
+func (x *CgroupStats) GetNrUninterruptible() uint64 {
+ if x != nil {
+ return x.NrUninterruptible
+ }
+ return 0
+}
+
+func (x *CgroupStats) GetNrIoWait() uint64 {
+ if x != nil {
+ return x.NrIoWait
+ }
+ return 0
+}
+
+var File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto protoreflect.FileDescriptor
+
+var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x69, 0x6f, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x73, 0x2e, 0x76, 0x31, 0x22, 0xcd, 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x12, 0x3f, 0x0a, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x75,
+ 0x67, 0x65, 0x74, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74,
+ 0x6c, 0x62, 0x12, 0x36, 0x0a, 0x04, 0x70, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x64, 0x73,
+ 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x12, 0x33, 0x0a, 0x03, 0x63, 0x70,
+ 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12,
+ 0x3c, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72,
+ 0x79, 0x53, 0x74, 0x61, 0x74, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x39, 0x0a,
+ 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61,
+ 0x74, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x12, 0x36, 0x0a, 0x04, 0x72, 0x64, 0x6d, 0x61,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x72, 0x64, 0x6d, 0x61,
+ 0x12, 0x3f, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x12, 0x6d,
+ 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x6b, 0x0a, 0x0b, 0x48, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62,
+ 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61,
+ 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07,
+ 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66,
+ 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69,
+ 0x7a, 0x65, 0x22, 0x3a, 0x0a, 0x08, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x12, 0x18,
+ 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x87,
+ 0x01, 0x0a, 0x07, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x75, 0x73,
+ 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x75,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69,
+ 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x52, 0x0a, 0x74, 0x68,
+ 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x65, 0x0a, 0x08, 0x43, 0x50, 0x55, 0x55,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65,
+ 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e,
+ 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70,
+ 0x75, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x70, 0x65, 0x72, 0x43, 0x70, 0x75, 0x22,
+ 0x78, 0x0a, 0x08, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x65,
+ 0x72, 0x69, 0x6f, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c,
+ 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x50, 0x65, 0x72, 0x69, 0x6f,
+ 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f,
+ 0x74, 0x74, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x94, 0x0b, 0x0a, 0x0a, 0x4d, 0x65,
+ 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x61, 0x63, 0x68,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x61, 0x63, 0x68, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x73, 0x73,
+ 0x12, 0x19, 0x0a, 0x08, 0x72, 0x73, 0x73, 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x07, 0x72, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d,
+ 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0a, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x64, 0x69, 0x72, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x64, 0x69, 0x72,
+ 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b,
+ 0x12, 0x18, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x06, 0x70, 0x67, 0x50, 0x67, 0x49, 0x6e, 0x12, 0x1a, 0x0a, 0x09, 0x70, 0x67,
+ 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70,
+ 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x46, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x70, 0x67, 0x5f, 0x6d, 0x61, 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12,
+ 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61,
+ 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63,
+ 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, 0x0a,
+ 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x77,
+ 0x61, 0x70, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15,
+ 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x77, 0x61, 0x70,
+ 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63,
+ 0x61, 0x63, 0x68, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f,
+ 0x72, 0x73, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x52, 0x73, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x73, 0x73,
+ 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x52, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x70, 0x65,
+ 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64,
+ 0x69, 0x72, 0x74, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x44, 0x69, 0x72, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x12,
+ 0x23, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69,
+ 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67,
+ 0x50, 0x67, 0x49, 0x6e, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67,
+ 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1a, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x46, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x6d, 0x61,
+ 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2e,
+ 0x0a, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x49, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2a,
+ 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61,
+ 0x6e, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c,
+ 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x6e,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
+ 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f,
+ 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65,
+ 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x39, 0x0a, 0x04, 0x73, 0x77, 0x61, 0x70, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x73, 0x77, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x06, 0x6b,
+ 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x44, 0x0a, 0x0a, 0x6b, 0x65,
+ 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x63, 0x70,
+ 0x22, 0x65, 0x0a, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
+ 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d,
+ 0x61, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a,
+ 0x07, 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
+ 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x22, 0x74, 0x0a, 0x10, 0x4d, 0x65, 0x6d, 0x6f, 0x72,
+ 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x6f,
+ 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x44, 0x69,
+ 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x6f,
+ 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x4f,
+ 0x6f, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x22, 0xd5, 0x05,
+ 0x0a, 0x09, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x12, 0x61, 0x0a, 0x1a, 0x69,
+ 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x58,
+ 0x0a, 0x15, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x5f, 0x72, 0x65,
+ 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x52,
+ 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f, 0x5f, 0x71,
+ 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, 0x6f, 0x51,
+ 0x75, 0x65, 0x75, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x5f,
+ 0x0a, 0x19, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b,
+ 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12,
+ 0x59, 0x0a, 0x16, 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f,
+ 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76,
+ 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69,
+ 0x6f, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65,
+ 0x12, 0x50, 0x0a, 0x11, 0x69, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75,
+ 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0f, 0x69, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x12, 0x51, 0x0a, 0x11, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x72, 0x65,
+ 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x63, 0x75,
+ 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x76, 0x0a, 0x0a, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x6f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d,
+ 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
+ 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x84, 0x01,
+ 0x0a, 0x08, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x22, 0x65, 0x0a, 0x09, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61,
+ 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a,
+ 0x68, 0x63, 0x61, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63,
+ 0x61, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x0a, 0x68, 0x63, 0x61, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x0b,
+ 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x19, 0x0a, 0x08, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x07, 0x72, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x72, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x78, 0x5f,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x78,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78, 0x5f, 0x64, 0x72, 0x6f,
+ 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x72, 0x78, 0x44, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12,
+ 0x1b, 0x0a, 0x09, 0x74, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x74, 0x78, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x74, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x74, 0x78, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x0b,
+ 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6e,
+ 0x72, 0x5f, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0a, 0x6e, 0x72, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x6e, 0x72, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x6e, 0x72, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6e,
+ 0x72, 0x5f, 0x73, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x09, 0x6e, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x6e, 0x72,
+ 0x5f, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x6e, 0x72, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6e, 0x72, 0x5f,
+ 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e,
+ 0x72, 0x49, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce sync.Once
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc
+)
+
+func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP() []byte {
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce.Do(func() {
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData)
+ })
+ return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData
+}
+
+var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = []interface{}{
+ (*Metrics)(nil), // 0: io.containerd.cgroups.v1.Metrics
+ (*HugetlbStat)(nil), // 1: io.containerd.cgroups.v1.HugetlbStat
+ (*PidsStat)(nil), // 2: io.containerd.cgroups.v1.PidsStat
+ (*CPUStat)(nil), // 3: io.containerd.cgroups.v1.CPUStat
+ (*CPUUsage)(nil), // 4: io.containerd.cgroups.v1.CPUUsage
+ (*Throttle)(nil), // 5: io.containerd.cgroups.v1.Throttle
+ (*MemoryStat)(nil), // 6: io.containerd.cgroups.v1.MemoryStat
+ (*MemoryEntry)(nil), // 7: io.containerd.cgroups.v1.MemoryEntry
+ (*MemoryOomControl)(nil), // 8: io.containerd.cgroups.v1.MemoryOomControl
+ (*BlkIOStat)(nil), // 9: io.containerd.cgroups.v1.BlkIOStat
+ (*BlkIOEntry)(nil), // 10: io.containerd.cgroups.v1.BlkIOEntry
+ (*RdmaStat)(nil), // 11: io.containerd.cgroups.v1.RdmaStat
+ (*RdmaEntry)(nil), // 12: io.containerd.cgroups.v1.RdmaEntry
+ (*NetworkStat)(nil), // 13: io.containerd.cgroups.v1.NetworkStat
+ (*CgroupStats)(nil), // 14: io.containerd.cgroups.v1.CgroupStats
+}
+var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = []int32{
+ 1, // 0: io.containerd.cgroups.v1.Metrics.hugetlb:type_name -> io.containerd.cgroups.v1.HugetlbStat
+ 2, // 1: io.containerd.cgroups.v1.Metrics.pids:type_name -> io.containerd.cgroups.v1.PidsStat
+ 3, // 2: io.containerd.cgroups.v1.Metrics.cpu:type_name -> io.containerd.cgroups.v1.CPUStat
+ 6, // 3: io.containerd.cgroups.v1.Metrics.memory:type_name -> io.containerd.cgroups.v1.MemoryStat
+ 9, // 4: io.containerd.cgroups.v1.Metrics.blkio:type_name -> io.containerd.cgroups.v1.BlkIOStat
+ 11, // 5: io.containerd.cgroups.v1.Metrics.rdma:type_name -> io.containerd.cgroups.v1.RdmaStat
+ 13, // 6: io.containerd.cgroups.v1.Metrics.network:type_name -> io.containerd.cgroups.v1.NetworkStat
+ 14, // 7: io.containerd.cgroups.v1.Metrics.cgroup_stats:type_name -> io.containerd.cgroups.v1.CgroupStats
+ 8, // 8: io.containerd.cgroups.v1.Metrics.memory_oom_control:type_name -> io.containerd.cgroups.v1.MemoryOomControl
+ 4, // 9: io.containerd.cgroups.v1.CPUStat.usage:type_name -> io.containerd.cgroups.v1.CPUUsage
+ 5, // 10: io.containerd.cgroups.v1.CPUStat.throttling:type_name -> io.containerd.cgroups.v1.Throttle
+ 7, // 11: io.containerd.cgroups.v1.MemoryStat.usage:type_name -> io.containerd.cgroups.v1.MemoryEntry
+ 7, // 12: io.containerd.cgroups.v1.MemoryStat.swap:type_name -> io.containerd.cgroups.v1.MemoryEntry
+ 7, // 13: io.containerd.cgroups.v1.MemoryStat.kernel:type_name -> io.containerd.cgroups.v1.MemoryEntry
+ 7, // 14: io.containerd.cgroups.v1.MemoryStat.kernel_tcp:type_name -> io.containerd.cgroups.v1.MemoryEntry
+ 10, // 15: io.containerd.cgroups.v1.BlkIOStat.io_service_bytes_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 16: io.containerd.cgroups.v1.BlkIOStat.io_serviced_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 17: io.containerd.cgroups.v1.BlkIOStat.io_queued_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 18: io.containerd.cgroups.v1.BlkIOStat.io_service_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 19: io.containerd.cgroups.v1.BlkIOStat.io_wait_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 20: io.containerd.cgroups.v1.BlkIOStat.io_merged_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 21: io.containerd.cgroups.v1.BlkIOStat.io_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 10, // 22: io.containerd.cgroups.v1.BlkIOStat.sectors_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry
+ 12, // 23: io.containerd.cgroups.v1.RdmaStat.current:type_name -> io.containerd.cgroups.v1.RdmaEntry
+ 12, // 24: io.containerd.cgroups.v1.RdmaStat.limit:type_name -> io.containerd.cgroups.v1.RdmaEntry
+ 25, // [25:25] is the sub-list for method output_type
+ 25, // [25:25] is the sub-list for method input_type
+ 25, // [25:25] is the sub-list for extension type_name
+ 25, // [25:25] is the sub-list for extension extendee
+ 0, // [0:25] is the sub-list for field type_name
+}
+
+func init() { file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() }
+func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() {
+ if File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metrics); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HugetlbStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PidsStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CPUStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CPUUsage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Throttle); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryOomControl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlkIOStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlkIOEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RdmaStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RdmaEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CgroupStats); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes,
+ DependencyIndexes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs,
+ MessageInfos: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes,
+ }.Build()
+ File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto = out.File
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = nil
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = nil
+ file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt
similarity index 97%
rename from vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt
rename to vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt
index e476cea64..7e4313ea5 100644
--- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt
@@ -1,7 +1,6 @@
file {
- name: "github.com/containerd/cgroups/stats/v1/metrics.proto"
+ name: "github.com/containerd/cgroups/cgroup1/stats/metrics.proto"
package: "io.containerd.cgroups.v1"
- dependency: "gogoproto/gogo.proto"
message_type {
name: "Metrics"
field {
@@ -26,9 +25,6 @@ file {
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.CPUStat"
- options {
- 65004: "CPU"
- }
json_name: "cpu"
}
field {
@@ -175,9 +171,6 @@ file {
number: 4
label: LABEL_REPEATED
type: TYPE_UINT64
- options {
- 65004: "PerCPU"
- }
json_name: "perCpu"
}
}
@@ -219,9 +212,6 @@ file {
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
- options {
- 65004: "RSS"
- }
json_name: "rss"
}
field {
@@ -229,9 +219,6 @@ file {
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
- options {
- 65004: "RSSHuge"
- }
json_name: "rssHuge"
}
field {
@@ -344,9 +331,6 @@ file {
number: 19
label: LABEL_OPTIONAL
type: TYPE_UINT64
- options {
- 65004: "TotalRSS"
- }
json_name: "totalRss"
}
field {
@@ -354,9 +338,6 @@ file {
number: 20
label: LABEL_OPTIONAL
type: TYPE_UINT64
- options {
- 65004: "TotalRSSHuge"
- }
json_name: "totalRssHuge"
}
field {
@@ -473,9 +454,6 @@ file {
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
- options {
- 65004: "KernelTCP"
- }
json_name: "kernelTcp"
}
}
@@ -786,5 +764,8 @@ file {
json_name: "nrIoWait"
}
}
+ options {
+ go_package: "github.com/containerd/cgroups/cgroup1/stats"
+ }
syntax: "proto3"
}
diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto
similarity index 87%
rename from vendor/github.com/containerd/cgroups/stats/v1/metrics.proto
rename to vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto
index b3f6cc37d..e6e4444b1 100644
--- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto
@@ -2,12 +2,12 @@ syntax = "proto3";
package io.containerd.cgroups.v1;
-import "gogoproto/gogo.proto";
+option go_package = "github.com/containerd/cgroups/cgroup1/stats";
message Metrics {
repeated HugetlbStat hugetlb = 1;
PidsStat pids = 2;
- CPUStat cpu = 3 [(gogoproto.customname) = "CPU"];
+ CPUStat cpu = 3;
MemoryStat memory = 4;
BlkIOStat blkio = 5;
RdmaStat rdma = 6;
@@ -38,7 +38,7 @@ message CPUUsage {
uint64 total = 1;
uint64 kernel = 2;
uint64 user = 3;
- repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"];
+ repeated uint64 per_cpu = 4;
}
@@ -50,8 +50,8 @@ message Throttle {
message MemoryStat {
uint64 cache = 1;
- uint64 rss = 2 [(gogoproto.customname) = "RSS"];
- uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"];
+ uint64 rss = 2;
+ uint64 rss_huge = 3;
uint64 mapped_file = 4;
uint64 dirty = 5;
uint64 writeback = 6;
@@ -67,8 +67,8 @@ message MemoryStat {
uint64 hierarchical_memory_limit = 16;
uint64 hierarchical_swap_limit = 17;
uint64 total_cache = 18;
- uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"];
- uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"];
+ uint64 total_rss = 19;
+ uint64 total_rss_huge = 20;
uint64 total_mapped_file = 21;
uint64 total_dirty = 22;
uint64 total_writeback = 23;
@@ -84,7 +84,7 @@ message MemoryStat {
MemoryEntry usage = 33;
MemoryEntry swap = 34;
MemoryEntry kernel = 35;
- MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"];
+ MemoryEntry kernel_tcp = 36;
}
diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE
deleted file mode 100644
index 8915f0277..000000000
--- a/vendor/github.com/containerd/containerd/NOTICE
+++ /dev/null
@@ -1,16 +0,0 @@
-Docker
-Copyright 2012-2015 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (https://www.docker.com).
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go
deleted file mode 100644
index 05a35228c..000000000
--- a/vendor/github.com/containerd/containerd/errdefs/errors.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-// Package errdefs defines the common errors used throughout containerd
-// packages.
-//
-// Use with errors.Wrap and error.Wrapf to add context to an error.
-//
-// To detect an error class, use the IsXXX functions to tell whether an error
-// is of a certain type.
-//
-// The functions ToGRPC and FromGRPC can be used to map server-side and
-// client-side errors to the correct types.
-package errdefs
-
-import (
- "context"
-
- "github.com/pkg/errors"
-)
-
-// Definitions of common error types used throughout containerd. All containerd
-// errors returned by most packages will map into one of these errors classes.
-// Packages should return errors of these types when they want to instruct a
-// client to take a particular action.
-//
-// For the most part, we just try to provide local grpc errors. Most conditions
-// map very well to those defined by grpc.
-var (
- ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
- ErrInvalidArgument = errors.New("invalid argument")
- ErrNotFound = errors.New("not found")
- ErrAlreadyExists = errors.New("already exists")
- ErrFailedPrecondition = errors.New("failed precondition")
- ErrUnavailable = errors.New("unavailable")
- ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
-)
-
-// IsInvalidArgument returns true if the error is due to an invalid argument
-func IsInvalidArgument(err error) bool {
- return errors.Is(err, ErrInvalidArgument)
-}
-
-// IsNotFound returns true if the error is due to a missing object
-func IsNotFound(err error) bool {
- return errors.Is(err, ErrNotFound)
-}
-
-// IsAlreadyExists returns true if the error is due to an already existing
-// metadata item
-func IsAlreadyExists(err error) bool {
- return errors.Is(err, ErrAlreadyExists)
-}
-
-// IsFailedPrecondition returns true if an operation could not proceed to the
-// lack of a particular condition
-func IsFailedPrecondition(err error) bool {
- return errors.Is(err, ErrFailedPrecondition)
-}
-
-// IsUnavailable returns true if the error is due to a resource being unavailable
-func IsUnavailable(err error) bool {
- return errors.Is(err, ErrUnavailable)
-}
-
-// IsNotImplemented returns true if the error is due to not being implemented
-func IsNotImplemented(err error) bool {
- return errors.Is(err, ErrNotImplemented)
-}
-
-// IsCanceled returns true if the error is due to `context.Canceled`.
-func IsCanceled(err error) bool {
- return errors.Is(err, context.Canceled)
-}
-
-// IsDeadlineExceeded returns true if the error is due to
-// `context.DeadlineExceeded`.
-func IsDeadlineExceeded(err error) bool {
- return errors.Is(err, context.DeadlineExceeded)
-}
diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go
deleted file mode 100644
index 209f63bd0..000000000
--- a/vendor/github.com/containerd/containerd/errdefs/grpc.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package errdefs
-
-import (
- "context"
- "strings"
-
- "github.com/pkg/errors"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// ToGRPC will attempt to map the backend containerd error into a grpc error,
-// using the original error message as a description.
-//
-// Further information may be extracted from certain errors depending on their
-// type.
-//
-// If the error is unmapped, the original error will be returned to be handled
-// by the regular grpc error handling stack.
-func ToGRPC(err error) error {
- if err == nil {
- return nil
- }
-
- if isGRPCError(err) {
- // error has already been mapped to grpc
- return err
- }
-
- switch {
- case IsInvalidArgument(err):
- return status.Errorf(codes.InvalidArgument, err.Error())
- case IsNotFound(err):
- return status.Errorf(codes.NotFound, err.Error())
- case IsAlreadyExists(err):
- return status.Errorf(codes.AlreadyExists, err.Error())
- case IsFailedPrecondition(err):
- return status.Errorf(codes.FailedPrecondition, err.Error())
- case IsUnavailable(err):
- return status.Errorf(codes.Unavailable, err.Error())
- case IsNotImplemented(err):
- return status.Errorf(codes.Unimplemented, err.Error())
- case IsCanceled(err):
- return status.Errorf(codes.Canceled, err.Error())
- case IsDeadlineExceeded(err):
- return status.Errorf(codes.DeadlineExceeded, err.Error())
- }
-
- return err
-}
-
-// ToGRPCf maps the error to grpc error codes, assembling the formatting string
-// and combining it with the target error string.
-//
-// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
-func ToGRPCf(err error, format string, args ...interface{}) error {
- return ToGRPC(errors.Wrapf(err, format, args...))
-}
-
-// FromGRPC returns the underlying error from a grpc service based on the grpc error code
-func FromGRPC(err error) error {
- if err == nil {
- return nil
- }
-
- var cls error // divide these into error classes, becomes the cause
-
- switch code(err) {
- case codes.InvalidArgument:
- cls = ErrInvalidArgument
- case codes.AlreadyExists:
- cls = ErrAlreadyExists
- case codes.NotFound:
- cls = ErrNotFound
- case codes.Unavailable:
- cls = ErrUnavailable
- case codes.FailedPrecondition:
- cls = ErrFailedPrecondition
- case codes.Unimplemented:
- cls = ErrNotImplemented
- case codes.Canceled:
- cls = context.Canceled
- case codes.DeadlineExceeded:
- cls = context.DeadlineExceeded
- default:
- cls = ErrUnknown
- }
-
- msg := rebaseMessage(cls, err)
- if msg != "" {
- err = errors.Wrap(cls, msg)
- } else {
- err = errors.WithStack(cls)
- }
-
- return err
-}
-
-// rebaseMessage removes the repeats for an error at the end of an error
-// string. This will happen when taking an error over grpc then remapping it.
-//
-// Effectively, we just remove the string of cls from the end of err if it
-// appears there.
-func rebaseMessage(cls error, err error) string {
- desc := errDesc(err)
- clss := cls.Error()
- if desc == clss {
- return ""
- }
-
- return strings.TrimSuffix(desc, ": "+clss)
-}
-
-func isGRPCError(err error) bool {
- _, ok := status.FromError(err)
- return ok
-}
-
-func code(err error) codes.Code {
- if s, ok := status.FromError(err); ok {
- return s.Code()
- }
- return codes.Unknown
-}
-
-func errDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go
deleted file mode 100644
index 37b6a7d1c..000000000
--- a/vendor/github.com/containerd/containerd/log/context.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package log
-
-import (
- "context"
-
- "github.com/sirupsen/logrus"
-)
-
-var (
- // G is an alias for GetLogger.
- //
- // We may want to define this locally to a package to get package tagged log
- // messages.
- G = GetLogger
-
- // L is an alias for the standard logger.
- L = logrus.NewEntry(logrus.StandardLogger())
-)
-
-type (
- loggerKey struct{}
-)
-
-const (
- // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
- // ensure the formatted time is always the same number of characters.
- RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-
- // TextFormat represents the text logging format
- TextFormat = "text"
-
- // JSONFormat represents the JSON logging format
- JSONFormat = "json"
-)
-
-// WithLogger returns a new context with the provided logger. Use in
-// combination with logger.WithField(s) for great effect.
-func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
- return context.WithValue(ctx, loggerKey{}, logger)
-}
-
-// GetLogger retrieves the current logger from the context. If no logger is
-// available, the default logger is returned.
-func GetLogger(ctx context.Context) *logrus.Entry {
- logger := ctx.Value(loggerKey{})
-
- if logger == nil {
- return L
- }
-
- return logger.(*logrus.Entry)
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go
deleted file mode 100644
index c7657e186..000000000
--- a/vendor/github.com/containerd/containerd/platforms/compare.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// MatchComparer is able to match and compare platforms to
-// filter and sort platforms.
-type MatchComparer interface {
- Matcher
-
- Less(specs.Platform, specs.Platform) bool
-}
-
-// platformVector returns an (ordered) vector of appropriate specs.Platform
-// objects to try matching for the given platform object (see platforms.Only).
-func platformVector(platform specs.Platform) []specs.Platform {
- vector := []specs.Platform{platform}
-
- switch platform.Architecture {
- case "amd64":
- vector = append(vector, specs.Platform{
- Architecture: "386",
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: platform.Variant,
- })
- case "arm":
- if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
- for armVersion--; armVersion >= 5; armVersion-- {
- vector = append(vector, specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v" + strconv.Itoa(armVersion),
- })
- }
- }
- case "arm64":
- variant := platform.Variant
- if variant == "" {
- variant = "v8"
- }
- vector = append(vector, platformVector(specs.Platform{
- Architecture: "arm",
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: variant,
- })...)
- }
-
- return vector
-}
-
-// Only returns a match comparer for a single platform
-// using default resolution logic for the platform.
-//
-// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
-// For arm/v7, will also match arm/v6 and arm/v5
-// For arm/v6, will also match arm/v5
-// For amd64, will also match 386
-func Only(platform specs.Platform) MatchComparer {
- return Ordered(platformVector(Normalize(platform))...)
-}
-
-// OnlyStrict returns a match comparer for a single platform.
-//
-// Unlike Only, OnlyStrict does not match sub platforms.
-// So, "arm/vN" will not match "arm/vM" where M < N,
-// and "amd64" will not also match "386".
-//
-// OnlyStrict matches non-canonical forms.
-// So, "arm64" matches "arm/64/v8".
-func OnlyStrict(platform specs.Platform) MatchComparer {
- return Ordered(Normalize(platform))
-}
-
-// Ordered returns a platform MatchComparer which matches any of the platforms
-// but orders them in order they are provided.
-func Ordered(platforms ...specs.Platform) MatchComparer {
- matchers := make([]Matcher, len(platforms))
- for i := range platforms {
- matchers[i] = NewMatcher(platforms[i])
- }
- return orderedPlatformComparer{
- matchers: matchers,
- }
-}
-
-// Any returns a platform MatchComparer which matches any of the platforms
-// with no preference for ordering.
-func Any(platforms ...specs.Platform) MatchComparer {
- matchers := make([]Matcher, len(platforms))
- for i := range platforms {
- matchers[i] = NewMatcher(platforms[i])
- }
- return anyPlatformComparer{
- matchers: matchers,
- }
-}
-
-// All is a platform MatchComparer which matches all platforms
-// with preference for ordering.
-var All MatchComparer = allPlatformComparer{}
-
-type orderedPlatformComparer struct {
- matchers []Matcher
-}
-
-func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
- for _, m := range c.matchers {
- if m.Match(platform) {
- return true
- }
- }
- return false
-}
-
-func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
- for _, m := range c.matchers {
- p1m := m.Match(p1)
- p2m := m.Match(p2)
- if p1m && !p2m {
- return true
- }
- if p1m || p2m {
- return false
- }
- }
- return false
-}
-
-type anyPlatformComparer struct {
- matchers []Matcher
-}
-
-func (c anyPlatformComparer) Match(platform specs.Platform) bool {
- for _, m := range c.matchers {
- if m.Match(platform) {
- return true
- }
- }
- return false
-}
-
-func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
- var p1m, p2m bool
- for _, m := range c.matchers {
- if !p1m && m.Match(p1) {
- p1m = true
- }
- if !p2m && m.Match(p2) {
- p2m = true
- }
- if p1m && p2m {
- return false
- }
- }
- // If one matches, and the other does, sort match first
- return p1m && !p2m
-}
-
-type allPlatformComparer struct{}
-
-func (allPlatformComparer) Match(specs.Platform) bool {
- return true
-}
-
-func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
- return false
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
deleted file mode 100644
index 4a7177e31..000000000
--- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "bufio"
- "os"
- "runtime"
- "strings"
- "sync"
-
- "github.com/containerd/containerd/errdefs"
- "github.com/containerd/containerd/log"
- "github.com/pkg/errors"
-)
-
-// Present the ARM instruction set architecture, eg: v7, v8
-// Don't use this value directly; call cpuVariant() instead.
-var cpuVariantValue string
-
-var cpuVariantOnce sync.Once
-
-func cpuVariant() string {
- cpuVariantOnce.Do(func() {
- if isArmArch(runtime.GOARCH) {
- cpuVariantValue = getCPUVariant()
- }
- })
- return cpuVariantValue
-}
-
-// For Linux, the kernel has already detected the ABI, ISA and Features.
-// So we don't need to access the ARM registers to detect platform information
-// by ourselves. We can just parse these information from /proc/cpuinfo
-func getCPUInfo(pattern string) (info string, err error) {
- if !isLinuxOS(runtime.GOOS) {
- return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
- }
-
- cpuinfo, err := os.Open("/proc/cpuinfo")
- if err != nil {
- return "", err
- }
- defer cpuinfo.Close()
-
- // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
- // the first core is enough.
- scanner := bufio.NewScanner(cpuinfo)
- for scanner.Scan() {
- newline := scanner.Text()
- list := strings.Split(newline, ":")
-
- if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
- return strings.TrimSpace(list[1]), nil
- }
- }
-
- // Check whether the scanner encountered errors
- err = scanner.Err()
- if err != nil {
- return "", err
- }
-
- return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
-}
-
-func getCPUVariant() string {
- if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
- // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
- // runtime.GOARCH to determine the variants
- var variant string
- switch runtime.GOARCH {
- case "arm64":
- variant = "v8"
- case "arm":
- variant = "v7"
- default:
- variant = "unknown"
- }
-
- return variant
- }
-
- variant, err := getCPUInfo("Cpu architecture")
- if err != nil {
- log.L.WithError(err).Error("failure getting variant")
- return ""
- }
-
- // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
- // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
- if runtime.GOARCH == "arm" && variant == "7" {
- model, err := getCPUInfo("model name")
- if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
- variant = "6"
- }
- }
-
- switch strings.ToLower(variant) {
- case "8", "aarch64":
- variant = "v8"
- case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
- variant = "v7"
- case "6", "6tej":
- variant = "v6"
- case "5", "5t", "5te", "5tej":
- variant = "v5"
- case "4", "4t":
- variant = "v4"
- case "3":
- variant = "v3"
- default:
- variant = "unknown"
- }
-
- return variant
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go
deleted file mode 100644
index 6ede94061..000000000
--- a/vendor/github.com/containerd/containerd/platforms/database.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "runtime"
- "strings"
-)
-
-// isLinuxOS returns true if the operating system is Linux.
-//
-// The OS value should be normalized before calling this function.
-func isLinuxOS(os string) bool {
- return os == "linux"
-}
-
-// These function are generated from https://golang.org/src/go/build/syslist.go.
-//
-// We use switch statements because they are slightly faster than map lookups
-// and use a little less memory.
-
-// isKnownOS returns true if we know about the operating system.
-//
-// The OS value should be normalized before calling this function.
-func isKnownOS(os string) bool {
- switch os {
- case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
- return true
- }
- return false
-}
-
-// isArmArch returns true if the architecture is ARM.
-//
-// The arch value should be normalized before being passed to this function.
-func isArmArch(arch string) bool {
- switch arch {
- case "arm", "arm64":
- return true
- }
- return false
-}
-
-// isKnownArch returns true if we know about the architecture.
-//
-// The arch value should be normalized before being passed to this function.
-func isKnownArch(arch string) bool {
- switch arch {
- case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
- return true
- }
- return false
-}
-
-func normalizeOS(os string) string {
- if os == "" {
- return runtime.GOOS
- }
- os = strings.ToLower(os)
-
- switch os {
- case "macos":
- os = "darwin"
- }
- return os
-}
-
-// normalizeArch normalizes the architecture.
-func normalizeArch(arch, variant string) (string, string) {
- arch, variant = strings.ToLower(arch), strings.ToLower(variant)
- switch arch {
- case "i386":
- arch = "386"
- variant = ""
- case "x86_64", "x86-64":
- arch = "amd64"
- variant = ""
- case "aarch64", "arm64":
- arch = "arm64"
- switch variant {
- case "8", "v8":
- variant = ""
- }
- case "armhf":
- arch = "arm"
- variant = "v7"
- case "armel":
- arch = "arm"
- variant = "v6"
- case "arm":
- switch variant {
- case "", "7":
- variant = "v7"
- case "5", "6", "8":
- variant = "v" + variant
- }
- }
-
- return arch, variant
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go
deleted file mode 100644
index cb77fbc9f..000000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "runtime"
-
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// DefaultString returns the default string specifier for the platform.
-func DefaultString() string {
- return Format(DefaultSpec())
-}
-
-// DefaultSpec returns the current platform's default platform specification.
-func DefaultSpec() specs.Platform {
- return specs.Platform{
- OS: runtime.GOOS,
- Architecture: runtime.GOARCH,
- // The Variant field will be empty if arch != ARM.
- Variant: cpuVariant(),
- }
-}
-
-// DefaultStrict returns strict form of Default.
-func DefaultStrict() MatchComparer {
- return OnlyStrict(DefaultSpec())
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go
deleted file mode 100644
index e8a7d5ffa..000000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-// Default returns the default matcher for the platform.
-func Default() MatchComparer {
- return Only(DefaultSpec())
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
deleted file mode 100644
index 0c380e3b7..000000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// +build windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "fmt"
- "runtime"
- "strconv"
- "strings"
-
- imagespec "github.com/opencontainers/image-spec/specs-go/v1"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
- "golang.org/x/sys/windows"
-)
-
-type matchComparer struct {
- defaults Matcher
- osVersionPrefix string
-}
-
-// Match matches platform with the same windows major, minor
-// and build version.
-func (m matchComparer) Match(p imagespec.Platform) bool {
- if m.defaults.Match(p) {
- // TODO(windows): Figure out whether OSVersion is deprecated.
- return strings.HasPrefix(p.OSVersion, m.osVersionPrefix)
- }
- return false
-}
-
-// Less sorts matched platforms in front of other platforms.
-// For matched platforms, it puts platforms with larger revision
-// number in front.
-func (m matchComparer) Less(p1, p2 imagespec.Platform) bool {
- m1, m2 := m.Match(p1), m.Match(p2)
- if m1 && m2 {
- r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion)
- return r1 > r2
- }
- return m1 && !m2
-}
-
-func revision(v string) int {
- parts := strings.Split(v, ".")
- if len(parts) < 4 {
- return 0
- }
- r, err := strconv.Atoi(parts[3])
- if err != nil {
- return 0
- }
- return r
-}
-
-// Default returns the current platform's default platform specification.
-func Default() MatchComparer {
- major, minor, build := windows.RtlGetNtVersionNumbers()
- return matchComparer{
- defaults: Ordered(DefaultSpec(), specs.Platform{
- OS: "linux",
- Architecture: runtime.GOARCH,
- }),
- osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build),
- }
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go
deleted file mode 100644
index 088bdea05..000000000
--- a/vendor/github.com/containerd/containerd/platforms/platforms.go
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-// Package platforms provides a toolkit for normalizing, matching and
-// specifying container platforms.
-//
-// Centered around OCI platform specifications, we define a string-based
-// specifier syntax that can be used for user input. With a specifier, users
-// only need to specify the parts of the platform that are relevant to their
-// context, providing an operating system or architecture or both.
-//
-// How do I use this package?
-//
-// The vast majority of use cases should simply use the match function with
-// user input. The first step is to parse a specifier into a matcher:
-//
-// m, err := Parse("linux")
-// if err != nil { ... }
-//
-// Once you have a matcher, use it to match against the platform declared by a
-// component, typically from an image or runtime. Since extracting an images
-// platform is a little more involved, we'll use an example against the
-// platform default:
-//
-// if ok := m.Match(Default()); !ok { /* doesn't match */ }
-//
-// This can be composed in loops for resolving runtimes or used as a filter for
-// fetch and select images.
-//
-// More details of the specifier syntax and platform spec follow.
-//
-// Declaring Platform Support
-//
-// Components that have strict platform requirements should use the OCI
-// platform specification to declare their support. Typically, this will be
-// images and runtimes that should make these declaring which platform they
-// support specifically. This looks roughly as follows:
-//
-// type Platform struct {
-// Architecture string
-// OS string
-// Variant string
-// }
-//
-// Most images and runtimes should at least set Architecture and OS, according
-// to their GOARCH and GOOS values, respectively (follow the OCI image
-// specification when in doubt). ARM should set variant under certain
-// discussions, which are outlined below.
-//
-// Platform Specifiers
-//
-// While the OCI platform specifications provide a tool for components to
-// specify structured information, user input typically doesn't need the full
-// context and much can be inferred. To solve this problem, we introduced
-// "specifiers". A specifier has the format
-// `||/[/]`. The user can provide either the
-// operating system or the architecture or both.
-//
-// An example of a common specifier is `linux/amd64`. If the host has a default
-// of runtime that matches this, the user can simply provide the component that
-// matters. For example, if a image provides amd64 and arm64 support, the
-// operating system, `linux` can be inferred, so they only have to provide
-// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
-// where the architecture may be known but a runtime may support images from
-// different operating systems.
-//
-// Normalization
-//
-// Because not all users are familiar with the way the Go runtime represents
-// platforms, several normalizations have been provided to make this package
-// easier to user.
-//
-// The following are performed for architectures:
-//
-// Value Normalized
-// aarch64 arm64
-// armhf arm
-// armel arm/v6
-// i386 386
-// x86_64 amd64
-// x86-64 amd64
-//
-// We also normalize the operating system `macos` to `darwin`.
-//
-// ARM Support
-//
-// To qualify ARM architecture, the Variant field is used to qualify the arm
-// version. The most common arm version, v7, is represented without the variant
-// unless it is explicitly provided. This is treated as equivalent to armhf. A
-// previous architecture, armel, will be normalized to arm/v6.
-//
-// While these normalizations are provided, their support on arm platforms has
-// not yet been fully implemented and tested.
-package platforms
-
-import (
- "regexp"
- "runtime"
- "strconv"
- "strings"
-
- "github.com/containerd/containerd/errdefs"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-var (
- specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
-)
-
-// Matcher matches platforms specifications, provided by an image or runtime.
-type Matcher interface {
- Match(platform specs.Platform) bool
-}
-
-// NewMatcher returns a simple matcher based on the provided platform
-// specification. The returned matcher only looks for equality based on os,
-// architecture and variant.
-//
-// One may implement their own matcher if this doesn't provide the required
-// functionality.
-//
-// Applications should opt to use `Match` over directly parsing specifiers.
-func NewMatcher(platform specs.Platform) Matcher {
- return &matcher{
- Platform: Normalize(platform),
- }
-}
-
-type matcher struct {
- specs.Platform
-}
-
-func (m *matcher) Match(platform specs.Platform) bool {
- normalized := Normalize(platform)
- return m.OS == normalized.OS &&
- m.Architecture == normalized.Architecture &&
- m.Variant == normalized.Variant
-}
-
-func (m *matcher) String() string {
- return Format(m.Platform)
-}
-
-// Parse parses the platform specifier syntax into a platform declaration.
-//
-// Platform specifiers are in the format `||/[/]`.
-// The minimum required information for a platform specifier is the operating
-// system or architecture. If there is only a single string (no slashes), the
-// value will be matched against the known set of operating systems, then fall
-// back to the known set of architectures. The missing component will be
-// inferred based on the local environment.
-func Parse(specifier string) (specs.Platform, error) {
- if strings.Contains(specifier, "*") {
- // TODO(stevvooe): need to work out exact wildcard handling
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
- }
-
- parts := strings.Split(specifier, "/")
-
- for _, part := range parts {
- if !specifierRe.MatchString(part) {
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
- }
- }
-
- var p specs.Platform
- switch len(parts) {
- case 1:
- // in this case, we will test that the value might be an OS, then look
- // it up. If it is not known, we'll treat it as an architecture. Since
- // we have very little information about the platform here, we are
- // going to be a little more strict if we don't know about the argument
- // value.
- p.OS = normalizeOS(parts[0])
- if isKnownOS(p.OS) {
- // picks a default architecture
- p.Architecture = runtime.GOARCH
- if p.Architecture == "arm" && cpuVariant() != "v7" {
- p.Variant = cpuVariant()
- }
-
- return p, nil
- }
-
- p.Architecture, p.Variant = normalizeArch(parts[0], "")
- if p.Architecture == "arm" && p.Variant == "v7" {
- p.Variant = ""
- }
- if isKnownArch(p.Architecture) {
- p.OS = runtime.GOOS
- return p, nil
- }
-
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
- case 2:
- // In this case, we treat as a regular os/arch pair. We don't care
- // about whether or not we know of the platform.
- p.OS = normalizeOS(parts[0])
- p.Architecture, p.Variant = normalizeArch(parts[1], "")
- if p.Architecture == "arm" && p.Variant == "v7" {
- p.Variant = ""
- }
-
- return p, nil
- case 3:
- // we have a fully specified variant, this is rare
- p.OS = normalizeOS(parts[0])
- p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
- if p.Architecture == "arm64" && p.Variant == "" {
- p.Variant = "v8"
- }
-
- return p, nil
- }
-
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
-}
-
-// MustParse is like Parses but panics if the specifier cannot be parsed.
-// Simplifies initialization of global variables.
-func MustParse(specifier string) specs.Platform {
- p, err := Parse(specifier)
- if err != nil {
- panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
- }
- return p
-}
-
-// Format returns a string specifier from the provided platform specification.
-func Format(platform specs.Platform) string {
- if platform.OS == "" {
- return "unknown"
- }
-
- return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
-}
-
-func joinNotEmpty(s ...string) string {
- var ss []string
- for _, s := range s {
- if s == "" {
- continue
- }
-
- ss = append(ss, s)
- }
-
- return strings.Join(ss, "/")
-}
-
-// Normalize validates and translate the platform to the canonical value.
-//
-// For example, if "Aarch64" is encountered, we change it to "arm64" or if
-// "x86_64" is encountered, it becomes "amd64".
-func Normalize(platform specs.Platform) specs.Platform {
- platform.OS = normalizeOS(platform.OS)
- platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
-
- // these fields are deprecated, remove them
- platform.OSFeatures = nil
- platform.OSVersion = ""
-
- return platform
-}
diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE
similarity index 100%
rename from vendor/github.com/containerd/containerd/LICENSE
rename to vendor/github.com/containerd/errdefs/LICENSE
diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md
new file mode 100644
index 000000000..bd418c63f
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/README.md
@@ -0,0 +1,13 @@
+# errdefs
+
+A Go package for defining and checking common containerd errors.
+
+## Project details
+
+**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go
new file mode 100644
index 000000000..f654d1964
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/errors.go
@@ -0,0 +1,443 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package errdefs defines the common errors used throughout containerd
+// packages.
+//
+// Use with fmt.Errorf to add context to an error.
+//
+// To detect an error class, use the IsXXX functions to tell whether an error
+// is of a certain type.
+package errdefs
+
+import (
+ "context"
+ "errors"
+)
+
+// Definitions of common error types used throughout containerd. All containerd
+// errors returned by most packages will map into one of these errors classes.
+// Packages should return errors of these types when they want to instruct a
+// client to take a particular action.
+//
+// These errors map closely to grpc errors.
+var (
+ ErrUnknown = errUnknown{}
+ ErrInvalidArgument = errInvalidArgument{}
+ ErrNotFound = errNotFound{}
+ ErrAlreadyExists = errAlreadyExists{}
+ ErrPermissionDenied = errPermissionDenied{}
+ ErrResourceExhausted = errResourceExhausted{}
+ ErrFailedPrecondition = errFailedPrecondition{}
+ ErrConflict = errConflict{}
+ ErrNotModified = errNotModified{}
+ ErrAborted = errAborted{}
+ ErrOutOfRange = errOutOfRange{}
+ ErrNotImplemented = errNotImplemented{}
+ ErrInternal = errInternal{}
+ ErrUnavailable = errUnavailable{}
+ ErrDataLoss = errDataLoss{}
+ ErrUnauthenticated = errUnauthorized{}
+)
+
+// cancelled maps to Moby's "ErrCancelled"
+type cancelled interface {
+ Cancelled()
+}
+
+// IsCanceled returns true if the error is due to `context.Canceled`.
+func IsCanceled(err error) bool {
+ return errors.Is(err, context.Canceled) || isInterface[cancelled](err)
+}
+
+type errUnknown struct{}
+
+func (errUnknown) Error() string { return "unknown" }
+
+func (errUnknown) Unknown() {}
+
+func (e errUnknown) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unknown maps to Moby's "ErrUnknown"
+type unknown interface {
+ Unknown()
+}
+
+// IsUnknown returns true if the error is due to an unknown error,
+// unhandled condition or unexpected response.
+func IsUnknown(err error) bool {
+ return errors.Is(err, errUnknown{}) || isInterface[unknown](err)
+}
+
+type errInvalidArgument struct{}
+
+func (errInvalidArgument) Error() string { return "invalid argument" }
+
+func (errInvalidArgument) InvalidParameter() {}
+
+func (e errInvalidArgument) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// invalidParameter maps to Moby's "ErrInvalidParameter"
+type invalidParameter interface {
+ InvalidParameter()
+}
+
+// IsInvalidArgument returns true if the error is due to an invalid argument
+func IsInvalidArgument(err error) bool {
+ return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err)
+}
+
+// deadlineExceed maps to Moby's "ErrDeadline"
+type deadlineExceeded interface {
+ DeadlineExceeded()
+}
+
+// IsDeadlineExceeded returns true if the error is due to
+// `context.DeadlineExceeded`.
+func IsDeadlineExceeded(err error) bool {
+ return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err)
+}
+
+type errNotFound struct{}
+
+func (errNotFound) Error() string { return "not found" }
+
+func (errNotFound) NotFound() {}
+
+func (e errNotFound) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notFound maps to Moby's "ErrNotFound"
+type notFound interface {
+ NotFound()
+}
+
+// IsNotFound returns true if the error is due to a missing object
+func IsNotFound(err error) bool {
+ return errors.Is(err, ErrNotFound) || isInterface[notFound](err)
+}
+
+type errAlreadyExists struct{}
+
+func (errAlreadyExists) Error() string { return "already exists" }
+
+func (errAlreadyExists) AlreadyExists() {}
+
+func (e errAlreadyExists) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type alreadyExists interface {
+ AlreadyExists()
+}
+
+// IsAlreadyExists returns true if the error is due to an already existing
+// metadata item
+func IsAlreadyExists(err error) bool {
+ return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err)
+}
+
+type errPermissionDenied struct{}
+
+func (errPermissionDenied) Error() string { return "permission denied" }
+
+func (errPermissionDenied) Forbidden() {}
+
+func (e errPermissionDenied) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// forbidden maps to Moby's "ErrForbidden"
+type forbidden interface {
+ Forbidden()
+}
+
+// IsPermissionDenied returns true if the error is due to permission denied
+// or forbidden (403) response
+func IsPermissionDenied(err error) bool {
+ return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err)
+}
+
+type errResourceExhausted struct{}
+
+func (errResourceExhausted) Error() string { return "resource exhausted" }
+
+func (errResourceExhausted) ResourceExhausted() {}
+
+func (e errResourceExhausted) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type resourceExhausted interface {
+ ResourceExhausted()
+}
+
+// IsResourceExhausted returns true if the error is due to
+// a lack of resources or too many attempts.
+func IsResourceExhausted(err error) bool {
+ return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err)
+}
+
+type errFailedPrecondition struct{}
+
+func (e errFailedPrecondition) Error() string { return "failed precondition" }
+
+func (errFailedPrecondition) FailedPrecondition() {}
+
+func (e errFailedPrecondition) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type failedPrecondition interface {
+ FailedPrecondition()
+}
+
+// IsFailedPrecondition returns true if an operation could not proceed due to
+// the lack of a particular condition
+func IsFailedPrecondition(err error) bool {
+ return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err)
+}
+
+type errConflict struct{}
+
+func (errConflict) Error() string { return "conflict" }
+
+func (errConflict) Conflict() {}
+
+func (e errConflict) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// conflict maps to Moby's "ErrConflict"
+type conflict interface {
+ Conflict()
+}
+
+// IsConflict returns true if an operation could not proceed due to
+// a conflict.
+func IsConflict(err error) bool {
+ return errors.Is(err, errConflict{}) || isInterface[conflict](err)
+}
+
+type errNotModified struct{}
+
+func (errNotModified) Error() string { return "not modified" }
+
+func (errNotModified) NotModified() {}
+
+func (e errNotModified) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notModified maps to Moby's "ErrNotModified"
+type notModified interface {
+ NotModified()
+}
+
+// IsNotModified returns true if an operation could not proceed due
+// to an object not modified from a previous state.
+func IsNotModified(err error) bool {
+ return errors.Is(err, errNotModified{}) || isInterface[notModified](err)
+}
+
+type errAborted struct{}
+
+func (errAborted) Error() string { return "aborted" }
+
+func (errAborted) Aborted() {}
+
+func (e errAborted) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type aborted interface {
+ Aborted()
+}
+
+// IsAborted returns true if an operation was aborted.
+func IsAborted(err error) bool {
+ return errors.Is(err, errAborted{}) || isInterface[aborted](err)
+}
+
+type errOutOfRange struct{}
+
+func (errOutOfRange) Error() string { return "out of range" }
+
+func (errOutOfRange) OutOfRange() {}
+
+func (e errOutOfRange) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type outOfRange interface {
+ OutOfRange()
+}
+
+// IsOutOfRange returns true if an operation could not proceed due
+// to data being out of the expected range.
+func IsOutOfRange(err error) bool {
+ return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err)
+}
+
+type errNotImplemented struct{}
+
+func (errNotImplemented) Error() string { return "not implemented" }
+
+func (errNotImplemented) NotImplemented() {}
+
+func (e errNotImplemented) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notImplemented maps to Moby's "ErrNotImplemented"
+type notImplemented interface {
+ NotImplemented()
+}
+
+// IsNotImplemented returns true if the error is due to not being implemented
+func IsNotImplemented(err error) bool {
+ return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err)
+}
+
+type errInternal struct{}
+
+func (errInternal) Error() string { return "internal" }
+
+func (errInternal) System() {}
+
+func (e errInternal) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// system maps to Moby's "ErrSystem"
+type system interface {
+ System()
+}
+
+// IsInternal returns true if the error returns to an internal or system error
+func IsInternal(err error) bool {
+ return errors.Is(err, errInternal{}) || isInterface[system](err)
+}
+
+type errUnavailable struct{}
+
+func (errUnavailable) Error() string { return "unavailable" }
+
+func (errUnavailable) Unavailable() {}
+
+func (e errUnavailable) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unavailable maps to Moby's "ErrUnavailable"
+type unavailable interface {
+ Unavailable()
+}
+
+// IsUnavailable returns true if the error is due to a resource being unavailable
+func IsUnavailable(err error) bool {
+ return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err)
+}
+
+type errDataLoss struct{}
+
+func (errDataLoss) Error() string { return "data loss" }
+
+func (errDataLoss) DataLoss() {}
+
+func (e errDataLoss) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// dataLoss maps to Moby's "ErrDataLoss"
+type dataLoss interface {
+ DataLoss()
+}
+
+// IsDataLoss returns true if data during an operation was lost or corrupted
+func IsDataLoss(err error) bool {
+ return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err)
+}
+
+type errUnauthorized struct{}
+
+func (errUnauthorized) Error() string { return "unauthorized" }
+
+func (errUnauthorized) Unauthorized() {}
+
+func (e errUnauthorized) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unauthorized maps to Moby's "ErrUnauthorized"
+type unauthorized interface {
+ Unauthorized()
+}
+
+// IsUnauthorized returns true if the error indicates that the user was
+// unauthenticated or unauthorized.
+func IsUnauthorized(err error) bool {
+ return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err)
+}
+
+func isInterface[T any](err error) bool {
+ for {
+ switch x := err.(type) {
+ case T:
+ return true
+ case customMessage:
+ err = x.err
+ case interface{ Unwrap() error }:
+ err = x.Unwrap()
+ if err == nil {
+ return false
+ }
+ case interface{ Unwrap() []error }:
+ for _, err := range x.Unwrap() {
+ if isInterface[T](err) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+ }
+}
+
+// customMessage is used to provide a defined error with a custom message.
+// The message is not wrapped but can be compared by the `Is(error) bool` interface.
+type customMessage struct {
+ err error
+ msg string
+}
+
+func (c customMessage) Is(err error) bool {
+ return c.err == err
+}
+
+func (c customMessage) As(target any) bool {
+ return errors.As(c.err, target)
+}
+
+func (c customMessage) Error() string {
+ return c.msg
+}
diff --git a/vendor/github.com/containerd/errdefs/go.mod b/vendor/github.com/containerd/errdefs/go.mod
new file mode 100644
index 000000000..85c8ccb85
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/go.mod
@@ -0,0 +1,3 @@
+module github.com/containerd/errdefs
+
+go 1.20
diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/containerd/errdefs/go.sum
similarity index 100%
rename from vendor/github.com/cespare/xxhash/v2/go.sum
rename to vendor/github.com/containerd/errdefs/go.sum
diff --git a/vendor/github.com/containerd/errdefs/pkg/LICENSE b/vendor/github.com/containerd/errdefs/pkg/LICENSE
new file mode 100644
index 000000000..584149b6e
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go b/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
new file mode 100644
index 000000000..59577595a
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
@@ -0,0 +1,353 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package errgrpc provides utility functions for translating errors to
+// and from a gRPC context.
+//
+// The functions ToGRPC and ToNative can be used to map server-side and
+// client-side errors to the correct types.
+package errgrpc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ spb "google.golang.org/genproto/googleapis/rpc/status"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
+ "google.golang.org/protobuf/types/known/anypb"
+
+ "github.com/containerd/typeurl/v2"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/errdefs/pkg/internal/cause"
+ "github.com/containerd/errdefs/pkg/internal/types"
+)
+
+// ToGRPC will attempt to map the error into a grpc error, from the error types
+// defined in the the errdefs package and attempign to preserve the original
+// description. Any type which does not resolve to a defined error type will
+// be assigned the unknown error code.
+//
+// Further information may be extracted from certain errors depending on their
+// type. The grpc error details will be used to attempt to preserve as much of
+// the error structures and types as possible.
+//
+// Errors which can be marshaled using protobuf or typeurl will be considered
+// for including as GRPC error details.
+// Additionally, use the following interfaces in errors to preserve custom types:
+//
+// WrapError(error) error - Used to wrap the previous error
+// JoinErrors(...error) error - Used to join all previous errors
+// CollapseError() - Used for errors which carry information but
+// should not have their error message shown.
+func ToGRPC(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if _, ok := status.FromError(err); ok {
+ // error has already been mapped to grpc
+ return err
+ }
+ st := statusFromError(err)
+ if st != nil {
+ if details := errorDetails(err, false); len(details) > 0 {
+ if ds, _ := st.WithDetails(details...); ds != nil {
+ st = ds
+ }
+ }
+ err = st.Err()
+ }
+ return err
+}
+
+func statusFromError(err error) *status.Status {
+ switch errdefs.Resolve(err) {
+ case errdefs.ErrInvalidArgument:
+ return status.New(codes.InvalidArgument, err.Error())
+ case errdefs.ErrNotFound:
+ return status.New(codes.NotFound, err.Error())
+ case errdefs.ErrAlreadyExists:
+ return status.New(codes.AlreadyExists, err.Error())
+ case errdefs.ErrPermissionDenied:
+ return status.New(codes.PermissionDenied, err.Error())
+ case errdefs.ErrResourceExhausted:
+ return status.New(codes.ResourceExhausted, err.Error())
+ case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified:
+ return status.New(codes.FailedPrecondition, err.Error())
+ case errdefs.ErrAborted:
+ return status.New(codes.Aborted, err.Error())
+ case errdefs.ErrOutOfRange:
+ return status.New(codes.OutOfRange, err.Error())
+ case errdefs.ErrNotImplemented:
+ return status.New(codes.Unimplemented, err.Error())
+ case errdefs.ErrInternal:
+ return status.New(codes.Internal, err.Error())
+ case errdefs.ErrUnavailable:
+ return status.New(codes.Unavailable, err.Error())
+ case errdefs.ErrDataLoss:
+ return status.New(codes.DataLoss, err.Error())
+ case errdefs.ErrUnauthenticated:
+ return status.New(codes.Unauthenticated, err.Error())
+ case context.DeadlineExceeded:
+ return status.New(codes.DeadlineExceeded, err.Error())
+ case context.Canceled:
+ return status.New(codes.Canceled, err.Error())
+ case errdefs.ErrUnknown:
+ return status.New(codes.Unknown, err.Error())
+ }
+ return nil
+}
+
+// errorDetails returns an array of errors which make up the provided error.
+// If firstIncluded is true, then all encodable errors will be used, otherwise
+// the first error in an error list will be not be used, to account for the
+// the base status error which details are added to via wrap or join.
+//
+// The errors are ordered in way that they can be applied in order by either
+// wrapping or joining the errors to recreate an error with the same structure
+// when `WrapError` and `JoinErrors` interfaces are used.
+//
+// The intent is that when re-applying the errors to create a single error, the
+// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting
+// is the same as the original error.
+func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 {
+ switch uerr := err.(type) {
+ case interface{ Unwrap() error }:
+ details := errorDetails(uerr.Unwrap(), firstIncluded)
+
+ // If the type is able to wrap, then include if proto
+ if _, ok := err.(interface{ WrapError(error) error }); ok {
+ // Get proto message
+ if protoErr := toProtoMessage(err); protoErr != nil {
+ details = append(details, protoErr)
+ }
+ }
+
+ return details
+ case interface{ Unwrap() []error }:
+ var details []protoadapt.MessageV1
+ for i, e := range uerr.Unwrap() {
+ details = append(details, errorDetails(e, firstIncluded || i > 0)...)
+ }
+
+ if _, ok := err.(interface{ JoinErrors(...error) error }); ok {
+ // Get proto message
+ if protoErr := toProtoMessage(err); protoErr != nil {
+ details = append(details, protoErr)
+ }
+ }
+ return details
+ }
+
+ if firstIncluded {
+ if protoErr := toProtoMessage(err); protoErr != nil {
+ return []protoadapt.MessageV1{protoErr}
+ }
+ if gs, ok := status.FromError(ToGRPC(err)); ok {
+ return []protoadapt.MessageV1{gs.Proto()}
+ }
+ // TODO: Else include unknown extra error type?
+ }
+
+ return nil
+}
+
+func toProtoMessage(err error) protoadapt.MessageV1 {
+ // Do not double encode proto messages, otherwise use Any
+ if pm, ok := err.(protoadapt.MessageV1); ok {
+ return pm
+ }
+ if pm, ok := err.(proto.Message); ok {
+ return protoadapt.MessageV1Of(pm)
+ }
+
+ if reflect.TypeOf(err).Kind() == reflect.Ptr {
+ a, aerr := typeurl.MarshalAny(err)
+ if aerr == nil {
+ return &anypb.Any{
+ TypeUrl: a.GetTypeUrl(),
+ Value: a.GetValue(),
+ }
+ }
+ }
+ return nil
+}
+
+// ToGRPCf maps the error to grpc error codes, assembling the formatting string
+// and combining it with the target error string.
+//
+// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
+func ToGRPCf(err error, format string, args ...interface{}) error {
+ return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
+}
+
+// ToNative returns the underlying error from a grpc service based on the grpc
+// error code. The grpc details are used to add wrap the error in more context
+// or support multiple errors.
+func ToNative(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ s, isGRPC := status.FromError(err)
+
+ var (
+ desc string
+ code codes.Code
+ )
+
+ if isGRPC {
+ desc = s.Message()
+ code = s.Code()
+ } else {
+ desc = err.Error()
+ code = codes.Unknown
+ }
+
+ var cls error // divide these into error classes, becomes the cause
+
+ switch code {
+ case codes.InvalidArgument:
+ cls = errdefs.ErrInvalidArgument
+ case codes.AlreadyExists:
+ cls = errdefs.ErrAlreadyExists
+ case codes.NotFound:
+ cls = errdefs.ErrNotFound
+ case codes.Unavailable:
+ cls = errdefs.ErrUnavailable
+ case codes.FailedPrecondition:
+ // TODO: Has suffix is not sufficient for conflict and not modified
+ // Message should start with ": " or be at beginning of a line
+ // Message should end with ": " or be at the end of a line
+ // Compile a regex
+ if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) {
+ cls = errdefs.ErrConflict
+ } else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) {
+ cls = errdefs.ErrNotModified
+ } else {
+ cls = errdefs.ErrFailedPrecondition
+ }
+ case codes.Unimplemented:
+ cls = errdefs.ErrNotImplemented
+ case codes.Canceled:
+ cls = context.Canceled
+ case codes.DeadlineExceeded:
+ cls = context.DeadlineExceeded
+ case codes.Aborted:
+ cls = errdefs.ErrAborted
+ case codes.Unauthenticated:
+ cls = errdefs.ErrUnauthenticated
+ case codes.PermissionDenied:
+ cls = errdefs.ErrPermissionDenied
+ case codes.Internal:
+ cls = errdefs.ErrInternal
+ case codes.DataLoss:
+ cls = errdefs.ErrDataLoss
+ case codes.OutOfRange:
+ cls = errdefs.ErrOutOfRange
+ case codes.ResourceExhausted:
+ cls = errdefs.ErrResourceExhausted
+ default:
+ if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 {
+ if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 {
+ cls = cause.ErrUnexpectedStatus{Status: status}
+ }
+ }
+ if cls == nil {
+ cls = errdefs.ErrUnknown
+ }
+ }
+
+ msg := rebaseMessage(cls, desc)
+ if msg == "" {
+ err = cls
+ } else if msg != desc {
+ err = fmt.Errorf("%s: %w", msg, cls)
+ } else if wm, ok := cls.(interface{ WithMessage(string) error }); ok {
+ err = wm.WithMessage(msg)
+ } else {
+ err = fmt.Errorf("%s: %w", msg, cls)
+ }
+
+ if isGRPC {
+ errs := []error{err}
+ for _, a := range s.Details() {
+ var derr error
+
+ // First decode error if needed
+ if s, ok := a.(*spb.Status); ok {
+ derr = ToNative(status.ErrorProto(s))
+ } else if e, ok := a.(error); ok {
+ derr = e
+ } else if dany, ok := a.(typeurl.Any); ok {
+ i, uerr := typeurl.UnmarshalAny(dany)
+ if uerr == nil {
+ if e, ok = i.(error); ok {
+ derr = e
+ } else {
+ derr = fmt.Errorf("non-error unmarshalled detail: %v", i)
+ }
+ } else {
+ derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr)
+ }
+ } else {
+ derr = fmt.Errorf("non-error detail: %v", a)
+ }
+
+ switch werr := derr.(type) {
+ case interface{ WrapError(error) error }:
+ errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1])
+ case interface{ JoinErrors(...error) error }:
+ // TODO: Consider whether this should support joining a subset
+ errs[0] = werr.JoinErrors(errs...)
+ case interface{ CollapseError() }:
+ errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr)
+ default:
+ errs = append(errs, derr)
+ }
+
+ }
+ if len(errs) > 1 {
+ err = errors.Join(errs...)
+ } else {
+ err = errs[0]
+ }
+ }
+
+ return err
+}
+
+// rebaseMessage removes the repeats for an error at the end of an error
+// string. This will happen when taking an error over grpc then remapping it.
+//
+// Effectively, we just remove the string of cls from the end of err if it
+// appears there.
+func rebaseMessage(cls error, desc string) string {
+ clss := cls.Error()
+ if desc == clss {
+ return ""
+ }
+
+ return strings.TrimSuffix(desc, ": "+clss)
+}
diff --git a/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
new file mode 100644
index 000000000..d88756bb0
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
@@ -0,0 +1,33 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package cause is used to define root causes for errors
+// common to errors packages like grpc and http.
+package cause
+
+import "fmt"
+
+type ErrUnexpectedStatus struct {
+ Status int
+}
+
+const UnexpectedStatusPrefix = "unexpected status "
+
+func (e ErrUnexpectedStatus) Error() string {
+ return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
+}
+
+func (ErrUnexpectedStatus) Unknown() {}
diff --git a/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go b/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
new file mode 100644
index 000000000..a37e7722a
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
@@ -0,0 +1,57 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package types
+
+import "fmt"
+
+// CollapsibleError indicates the error should be collapsed
+type CollapsibleError interface {
+ CollapseError()
+}
+
+// CollapsedError returns a new error with the collapsed
+// error returned on unwrapped or when formatted with "%+v"
+func CollapsedError(err error, collapsed ...error) error {
+ return collapsedError{err, collapsed}
+}
+
+type collapsedError struct {
+ error
+ collapsed []error
+}
+
+func (c collapsedError) Unwrap() []error {
+ return append([]error{c.error}, c.collapsed...)
+}
+
+func (c collapsedError) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", c.error)
+ for _, err := range c.collapsed {
+ fmt.Fprintf(s, "\n%+v", err)
+ }
+ return
+ }
+ fallthrough
+ case 's':
+ fmt.Fprint(s, c.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", c.Error())
+ }
+}
diff --git a/vendor/github.com/containerd/errdefs/resolve.go b/vendor/github.com/containerd/errdefs/resolve.go
new file mode 100644
index 000000000..c02d4a73f
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/resolve.go
@@ -0,0 +1,147 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errdefs
+
+import "context"
+
+// Resolve returns the first error found in the error chain which matches an
+// error defined in this package or context error. A raw, unwrapped error is
+// returned or ErrUnknown if no matching error is found.
+//
+// This is useful for determining a response code based on the outermost wrapped
+// error rather than the original cause. For example, a not found error deep
+// in the code may be wrapped as an invalid argument. When determining status
+// code from Is* functions, the depth or ordering of the error is not
+// considered.
+//
+// The search order is depth first, a wrapped error returned from any part of
+// the chain from `Unwrap() error` will be returned before any joined errors
+// as returned by `Unwrap() []error`.
+func Resolve(err error) error {
+ if err == nil {
+ return nil
+ }
+ err = firstError(err)
+ if err == nil {
+ err = ErrUnknown
+ }
+ return err
+}
+
+func firstError(err error) error {
+ for {
+ switch err {
+ case ErrUnknown,
+ ErrInvalidArgument,
+ ErrNotFound,
+ ErrAlreadyExists,
+ ErrPermissionDenied,
+ ErrResourceExhausted,
+ ErrFailedPrecondition,
+ ErrConflict,
+ ErrNotModified,
+ ErrAborted,
+ ErrOutOfRange,
+ ErrNotImplemented,
+ ErrInternal,
+ ErrUnavailable,
+ ErrDataLoss,
+ ErrUnauthenticated,
+ context.DeadlineExceeded,
+ context.Canceled:
+ return err
+ }
+ switch e := err.(type) {
+ case customMessage:
+ err = e.err
+ case unknown:
+ return ErrUnknown
+ case invalidParameter:
+ return ErrInvalidArgument
+ case notFound:
+ return ErrNotFound
+ case alreadyExists:
+ return ErrAlreadyExists
+ case forbidden:
+ return ErrPermissionDenied
+ case resourceExhausted:
+ return ErrResourceExhausted
+ case failedPrecondition:
+ return ErrFailedPrecondition
+ case conflict:
+ return ErrConflict
+ case notModified:
+ return ErrNotModified
+ case aborted:
+ return ErrAborted
+ case errOutOfRange:
+ return ErrOutOfRange
+ case notImplemented:
+ return ErrNotImplemented
+ case system:
+ return ErrInternal
+ case unavailable:
+ return ErrUnavailable
+ case dataLoss:
+ return ErrDataLoss
+ case unauthorized:
+ return ErrUnauthenticated
+ case deadlineExceeded:
+ return context.DeadlineExceeded
+ case cancelled:
+ return context.Canceled
+ case interface{ Unwrap() error }:
+ err = e.Unwrap()
+ if err == nil {
+ return nil
+ }
+ case interface{ Unwrap() []error }:
+ for _, ue := range e.Unwrap() {
+ if fe := firstError(ue); fe != nil {
+ return fe
+ }
+ }
+ return nil
+ case interface{ Is(error) bool }:
+ for _, target := range []error{ErrUnknown,
+ ErrInvalidArgument,
+ ErrNotFound,
+ ErrAlreadyExists,
+ ErrPermissionDenied,
+ ErrResourceExhausted,
+ ErrFailedPrecondition,
+ ErrConflict,
+ ErrNotModified,
+ ErrAborted,
+ ErrOutOfRange,
+ ErrNotImplemented,
+ ErrInternal,
+ ErrUnavailable,
+ ErrDataLoss,
+ ErrUnauthenticated,
+ context.DeadlineExceeded,
+ context.Canceled} {
+ if e.Is(target) {
+ return target
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+ }
+}
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
similarity index 100%
rename from vendor/google.golang.org/genproto/LICENSE
rename to vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
new file mode 100644
index 000000000..6aba0ef1f
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -0,0 +1,689 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+)
+
+type options struct {
+ chunkSize int
+ compressionLevel int
+ prioritizedFiles []string
+ missedPrioritizedFiles *[]string
+ compression Compression
+ ctx context.Context
+ minChunkSize int
+}
+
+type Option func(o *options) error
+
+// WithChunkSize option specifies the chunk size of eStargz blob to build.
+func WithChunkSize(chunkSize int) Option {
+ return func(o *options) error {
+ o.chunkSize = chunkSize
+ return nil
+ }
+}
+
+// WithCompressionLevel option specifies the gzip compression level.
+// The default is gzip.BestCompression.
+// This option will be ignored if WithCompression option is used.
+// See also: https://godoc.org/compress/gzip#pkg-constants
+func WithCompressionLevel(level int) Option {
+ return func(o *options) error {
+ o.compressionLevel = level
+ return nil
+ }
+}
+
+// WithPrioritizedFiles option specifies the list of prioritized files.
+// These files must be complete paths that are absolute or relative to "/"
+// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
+// are treated as "/foo/bar".
+func WithPrioritizedFiles(files []string) Option {
+ return func(o *options) error {
+ o.prioritizedFiles = files
+ return nil
+ }
+}
+
+// WithAllowPrioritizeNotFound makes Build continue the execution even if some
+// of prioritized files specified by WithPrioritizedFiles option aren't found
+// in the input tar. Instead, this records all missed file names to the passed
+// slice.
+func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
+ return func(o *options) error {
+ if missedFiles == nil {
+ return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
+ }
+ o.missedPrioritizedFiles = missedFiles
+ return nil
+ }
+}
+
+// WithCompression specifies compression algorithm to be used.
+// Default is gzip.
+func WithCompression(compression Compression) Option {
+ return func(o *options) error {
+ o.compression = compression
+ return nil
+ }
+}
+
+// WithContext specifies a context that can be used for clean canceleration.
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.ctx = ctx
+ return nil
+ }
+}
+
+// WithMinChunkSize option specifies the minimal number of bytes of data
+// must be written in one gzip stream.
+// By increasing this number, one gzip stream can contain multiple files
+// and it hopefully leads to smaller result blob.
+// NOTE: This adds a TOC property that old reader doesn't understand.
+func WithMinChunkSize(minChunkSize int) Option {
+ return func(o *options) error {
+ o.minChunkSize = minChunkSize
+ return nil
+ }
+}
+
+// Blob is an eStargz blob.
+type Blob struct {
+ io.ReadCloser
+ diffID digest.Digester
+ tocDigest digest.Digest
+}
+
+// DiffID returns the digest of uncompressed blob.
+// It is only valid to call DiffID after Close.
+func (b *Blob) DiffID() digest.Digest {
+ return b.diffID.Digest()
+}
+
+// TOCDigest returns the digest of uncompressed TOC JSON.
+func (b *Blob) TOCDigest() digest.Digest {
+ return b.tocDigest
+}
+
+// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
+// or plain tar) passed through the argument. If there are some prioritized files are listed in
+// the option, these files are grouped as "prioritized" and can be used for runtime optimization
+// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
+// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
+func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
+ var opts options
+ opts.compressionLevel = gzip.BestCompression // BestCompression by default
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+ if opts.compression == nil {
+ opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
+ }
+ layerFiles := newTempFiles()
+ ctx := opts.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-done:
+ // nop
+ case <-ctx.Done():
+ layerFiles.CleanupAll()
+ }
+ }()
+ defer func() {
+ if rErr != nil {
+ if err := layerFiles.CleanupAll(); err != nil {
+ rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
+ }
+ }
+ if cErr := ctx.Err(); cErr != nil {
+ rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
+ }
+ }()
+ tarBlob, err := decompressBlob(tarBlob, layerFiles)
+ if err != nil {
+ return nil, err
+ }
+ entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
+ if err != nil {
+ return nil, err
+ }
+ var tarParts [][]*entry
+ if opts.minChunkSize > 0 {
+ // Each entry needs to know the size of the current gzip stream so they
+ // cannot be processed in parallel.
+ tarParts = [][]*entry{entries}
+ } else {
+ tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
+ }
+ writers := make([]*Writer, len(tarParts))
+ payloads := make([]*os.File, len(tarParts))
+ var mu sync.Mutex
+ var eg errgroup.Group
+ for i, parts := range tarParts {
+ i, parts := i, parts
+ // builds verifiable stargz sub-blobs
+ eg.Go(func() error {
+ esgzFile, err := layerFiles.TempFile("", "esgzdata")
+ if err != nil {
+ return err
+ }
+ sw := NewWriterWithCompressor(esgzFile, opts.compression)
+ sw.ChunkSize = opts.chunkSize
+ sw.MinChunkSize = opts.minChunkSize
+ if sw.needsOpenGzEntries == nil {
+ sw.needsOpenGzEntries = make(map[string]struct{})
+ }
+ for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
+ sw.needsOpenGzEntries[f] = struct{}{}
+ }
+ if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
+ return err
+ }
+ mu.Lock()
+ writers[i] = sw
+ payloads[i] = esgzFile
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ rErr = err
+ return nil, err
+ }
+ tocAndFooter, tocDgst, err := closeWithCombine(writers...)
+ if err != nil {
+ rErr = err
+ return nil, err
+ }
+ var rs []io.Reader
+ for _, p := range payloads {
+ fs, err := fileSectionReader(p)
+ if err != nil {
+ return nil, err
+ }
+ rs = append(rs, fs)
+ }
+ diffID := digest.Canonical.Digester()
+ pr, pw := io.Pipe()
+ go func() {
+ r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer r.Close()
+ if _, err := io.Copy(diffID.Hash(), r); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ pw.Close()
+ }()
+ return &Blob{
+ ReadCloser: readCloser{
+ Reader: pr,
+ closeFunc: layerFiles.CleanupAll,
+ },
+ tocDigest: tocDgst,
+ diffID: diffID,
+ }, nil
+}
+
+// closeWithCombine takes unclosed Writers and close them. This also returns the
+// toc that combined all Writers into.
+// Writers doesn't write TOC and footer to the underlying writers so they can be
+// combined into a single eStargz and tocAndFooter returned by this function can
+// be appended at the tail of that combined blob.
+func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
+ if len(ws) == 0 {
+ return nil, "", fmt.Errorf("at least one writer must be passed")
+ }
+ for _, w := range ws {
+ if w.closed {
+ return nil, "", fmt.Errorf("writer must be unclosed")
+ }
+ defer func(w *Writer) { w.closed = true }(w)
+ if err := w.closeGz(); err != nil {
+ return nil, "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return nil, "", err
+ }
+ }
+ var (
+ mtoc = new(JTOC)
+ currentOffset int64
+ )
+ mtoc.Version = ws[0].toc.Version
+ for _, w := range ws {
+ for _, e := range w.toc.Entries {
+ // Recalculate Offset of non-empty files/chunks
+ if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
+ e.Offset += currentOffset
+ }
+ mtoc.Entries = append(mtoc.Entries, e)
+ }
+ if w.toc.Version > mtoc.Version {
+ mtoc.Version = w.toc.Version
+ }
+ currentOffset += w.cw.n
+ }
+
+ return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
+}
+
+func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
+ buf := new(bytes.Buffer)
+ tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ return buf, tocDigest, nil
+}
+
+// divideEntries divides passed entries to the parts at least the number specified by the
+// argument.
+func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
+ var estimatedSize int64
+ for _, e := range entries {
+ estimatedSize += e.header.Size
+ }
+ unitSize := estimatedSize / int64(minPartsNum)
+ var (
+ nextEnd = unitSize
+ offset int64
+ )
+ set = append(set, []*entry{})
+ for _, e := range entries {
+ set[len(set)-1] = append(set[len(set)-1], e)
+ offset += e.header.Size
+ if offset > nextEnd {
+ set = append(set, []*entry{})
+ nextEnd += unitSize
+ }
+ }
+ return
+}
+
+var errNotFound = errors.New("not found")
+
+// sortEntries reads the specified tar blob and returns a list of tar entries.
+// If some of prioritized files are specified, the list starts from these
+// files with keeping the order specified by the argument.
+func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
+
+ // Import tar file.
+ intar, err := importTar(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to sort: %w", err)
+ }
+
+ // Sort the tar file respecting to the prioritized files list.
+ sorted := &tarFile{}
+ for _, l := range prioritized {
+ if err := moveRec(l, intar, sorted); err != nil {
+ if errors.Is(err, errNotFound) && missedPrioritized != nil {
+ *missedPrioritized = append(*missedPrioritized, l)
+ continue // allow not found
+ }
+ return nil, fmt.Errorf("failed to sort tar entries: %w", err)
+ }
+ }
+ if len(prioritized) == 0 {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ } else {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ }
+
+ // Dump all entry and concatinate them.
+ return append(sorted.dump(), intar.dump()...), nil
+}
+
+// readerFromEntries returns a reader of tar archive that contains entries passed
+// through the arguments.
+func readerFromEntries(entries ...*entry) io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ tw := tar.NewWriter(pw)
+ defer tw.Close()
+ for _, entry := range entries {
+ if err := tw.WriteHeader(entry.header); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
+ return
+ }
+ if _, err := io.Copy(tw, entry.payload); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
+ return
+ }
+ }
+ pw.Close()
+ }()
+ return pr
+}
+
+func importTar(in io.ReaderAt) (*tarFile, error) {
+ tf := &tarFile{}
+ pw, err := newCountReadSeeker(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make position watcher: %w", err)
+ }
+ tr := tar.NewReader(pw)
+
+ // Walk through all nodes.
+ for {
+ // Fetch and parse next header.
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
+ }
+ switch cleanEntryName(h.Name) {
+ case PrefetchLandmark, NoPrefetchLandmark:
+ // Ignore existing landmark
+ continue
+ }
+
+ // Add entry. If it already exists, replace it.
+ if _, ok := tf.get(h.Name); ok {
+ tf.remove(h.Name)
+ }
+ tf.add(&entry{
+ header: h,
+ payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
+ })
+ }
+
+ return tf, nil
+}
+
+func moveRec(name string, in *tarFile, out *tarFile) error {
+ name = cleanEntryName(name)
+ if name == "" { // root directory. stop recursion.
+ if e, ok := in.get(name); ok {
+ // entry of the root directory exists. we should move it as well.
+ // this case will occur if tar entries are prefixed with "./", "/", etc.
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+ }
+
+ _, okIn := in.get(name)
+ _, okOut := out.get(name)
+ if !okIn && !okOut {
+ return fmt.Errorf("file: %q: %w", name, errNotFound)
+ }
+
+ parent, _ := path.Split(strings.TrimSuffix(name, "/"))
+ if err := moveRec(parent, in, out); err != nil {
+ return err
+ }
+ if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
+ if err := moveRec(e.header.Linkname, in, out); err != nil {
+ return err
+ }
+ }
+ if e, ok := in.get(name); ok {
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+}
+
+type entry struct {
+ header *tar.Header
+ payload io.ReadSeeker
+}
+
+type tarFile struct {
+ index map[string]*entry
+ stream []*entry
+}
+
+func (f *tarFile) add(e *entry) {
+ if f.index == nil {
+ f.index = make(map[string]*entry)
+ }
+ f.index[cleanEntryName(e.header.Name)] = e
+ f.stream = append(f.stream, e)
+}
+
+func (f *tarFile) remove(name string) {
+ name = cleanEntryName(name)
+ if f.index != nil {
+ delete(f.index, name)
+ }
+ var filtered []*entry
+ for _, e := range f.stream {
+ if cleanEntryName(e.header.Name) == name {
+ continue
+ }
+ filtered = append(filtered, e)
+ }
+ f.stream = filtered
+}
+
+func (f *tarFile) get(name string) (e *entry, ok bool) {
+ if f.index == nil {
+ return nil, false
+ }
+ e, ok = f.index[cleanEntryName(name)]
+ return
+}
+
+func (f *tarFile) dump() []*entry {
+ return f.stream
+}
+
+type readCloser struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (rc readCloser) Close() error {
+ return rc.closeFunc()
+}
+
+func fileSectionReader(file *os.File) (*io.SectionReader, error) {
+ info, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(file, 0, info.Size()), nil
+}
+
+func newTempFiles() *tempFiles {
+ return &tempFiles{}
+}
+
+type tempFiles struct {
+ files []*os.File
+ filesMu sync.Mutex
+ cleanupOnce sync.Once
+}
+
+func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
+ f, err := os.CreateTemp(dir, pattern)
+ if err != nil {
+ return nil, err
+ }
+ tf.filesMu.Lock()
+ tf.files = append(tf.files, f)
+ tf.filesMu.Unlock()
+ return f, nil
+}
+
+func (tf *tempFiles) CleanupAll() (err error) {
+ tf.cleanupOnce.Do(func() {
+ err = tf.cleanupAll()
+ })
+ return
+}
+
+func (tf *tempFiles) cleanupAll() error {
+ tf.filesMu.Lock()
+ defer tf.filesMu.Unlock()
+ var allErr []error
+ for _, f := range tf.files {
+ if err := f.Close(); err != nil {
+ allErr = append(allErr, err)
+ }
+ if err := os.Remove(f.Name()); err != nil {
+ allErr = append(allErr, err)
+ }
+ }
+ tf.files = nil
+ return errorutil.Aggregate(allErr)
+}
+
+func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
+ pos := int64(0)
+ return &countReadSeeker{r: r, cPos: &pos}, nil
+}
+
+type countReadSeeker struct {
+ r io.ReaderAt
+ cPos *int64
+
+ mu sync.Mutex
+}
+
+func (cr *countReadSeeker) Read(p []byte) (int, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ n, err := cr.r.ReadAt(p, *cr.cPos)
+ if err == nil {
+ *cr.cPos += int64(n)
+ }
+ return n, err
+}
+
+func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ switch whence {
+ default:
+ return 0, fmt.Errorf("Unknown whence: %v", whence)
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += *cr.cPos
+ case io.SeekEnd:
+ return 0, fmt.Errorf("Unsupported whence: %v", whence)
+ }
+
+ if offset < 0 {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ *cr.cPos = offset
+ return offset, nil
+}
+
+func (cr *countReadSeeker) currentPos() int64 {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ return *cr.cPos
+}
+
+func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
+ if org.Size() < 4 {
+ return org, nil
+ }
+ src := make([]byte, 4)
+ if _, err := org.Read(src); err != nil && err != io.EOF {
+ return nil, err
+ }
+ var dR io.Reader
+ if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
+ // gzip
+ dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dgR.Close()
+ dR = io.Reader(dgR)
+ } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
+ // zstd
+ dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dzR.Close()
+ dR = io.Reader(dzR)
+ } else {
+ // uncompressed
+ return io.NewSectionReader(org, 0, org.Size()), nil
+ }
+ b, err := tmp.TempFile("", "uncompresseddata")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(b, dR); err != nil {
+ return nil, err
+ }
+ return fileSectionReader(b)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
new file mode 100644
index 000000000..6de78b02d
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errorutil
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Aggregate combines a list of errors into a single new error.
+func Aggregate(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ points := make([]string, len(errs)+1)
+ points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
+ for i, err := range errs {
+ points[i+1] = fmt.Sprintf("* %s", err)
+ }
+ return errors.New(strings.Join(points, "\n\t"))
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
new file mode 100644
index 000000000..f4d554655
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -0,0 +1,1223 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+// A Reader permits random access reads from a stargz file.
+type Reader struct {
+ sr *io.SectionReader
+ toc *JTOC
+ tocDigest digest.Digest
+
+ // m stores all non-chunk entries, keyed by name.
+ m map[string]*TOCEntry
+
+ // chunks stores all TOCEntry values for regular files that
+ // are split up. For a file with a single chunk, it's only
+ // stored in m.
+ chunks map[string][]*TOCEntry
+
+ decompressor Decompressor
+}
+
+type openOpts struct {
+ tocOffset int64
+ decompressors []Decompressor
+ telemetry *Telemetry
+}
+
+// OpenOption is an option used during opening the layer
+type OpenOption func(o *openOpts) error
+
+// WithTOCOffset option specifies the offset of TOC
+func WithTOCOffset(tocOffset int64) OpenOption {
+ return func(o *openOpts) error {
+ o.tocOffset = tocOffset
+ return nil
+ }
+}
+
+// WithDecompressors option specifies decompressors to use.
+// Default is gzip-based decompressor.
+func WithDecompressors(decompressors ...Decompressor) OpenOption {
+ return func(o *openOpts) error {
+ o.decompressors = decompressors
+ return nil
+ }
+}
+
+// WithTelemetry option specifies the telemetry hooks
+func WithTelemetry(telemetry *Telemetry) OpenOption {
+ return func(o *openOpts) error {
+ o.telemetry = telemetry
+ return nil
+ }
+}
+
+// MeasureLatencyHook is a func which takes start time and records the diff
+type MeasureLatencyHook func(time.Time)
+
+// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
+type Telemetry struct {
+ GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
+ GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
+ DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
+}
+
+// Open opens a stargz file for reading.
+// The behavior is configurable using options.
+//
+// Note that each entry name is normalized as the path that is relative to root.
+func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
+ var opts openOpts
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
+ decompressors := append(gzipCompressors, opts.decompressors...)
+
+ // Determine the size to fetch. Try to fetch as many bytes as possible.
+ fetchSize := maxFooterSize(sr.Size(), decompressors...)
+ if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize {
+ if maybeTocOffset > sr.Size() {
+ return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size())
+ }
+ fetchSize = sr.Size() - maybeTocOffset
+ }
+
+ start := time.Now() // before getting layer footer
+ footer := make([]byte, fetchSize)
+ if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil {
+ return nil, fmt.Errorf("error reading footer: %v", err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil {
+ opts.telemetry.GetFooterLatency(start)
+ }
+
+ var allErr []error
+ var found bool
+ var r *Reader
+ for _, d := range decompressors {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ maybeTocBytes := footer[:fOffset]
+ _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
+ if err != nil {
+ allErr = append(allErr, err)
+ continue
+ }
+ if tocOffset >= 0 && tocSize <= 0 {
+ tocSize = sr.Size() - tocOffset - fSize
+ }
+ if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
+ maybeTocBytes = maybeTocBytes[:tocSize]
+ }
+ r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
+ if err == nil {
+ found = true
+ break
+ }
+ allErr = append(allErr, err)
+ }
+ if !found {
+ return nil, errorutil.Aggregate(allErr)
+ }
+ if err := r.initFields(); err != nil {
+ return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
+ }
+ return r, nil
+}
+
+// OpenFooter extracts and parses footer from the given blob.
+// only supports gzip-based eStargz.
+func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
+ if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
+ return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
+ }
+ var footer [FooterSize]byte
+ if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
+ return 0, 0, fmt.Errorf("error reading footer: %v", err)
+ }
+ var allErr []error
+ for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ _, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
+ if err == nil {
+ return tocOffset, fSize, err
+ }
+ allErr = append(allErr, err)
+ }
+ return 0, 0, errorutil.Aggregate(allErr)
+}
+
+// initFields populates the Reader from r.toc after decoding it from
+// JSON.
+//
+// Unexported fields are populated and TOCEntry fields that were
+// implicit in the JSON are populated.
+func (r *Reader) initFields() error {
+ r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
+ r.chunks = make(map[string][]*TOCEntry)
+ var lastPath string
+ uname := map[int]string{}
+ gname := map[int]string{}
+ var lastRegEnt *TOCEntry
+ var chunkTopIndex int
+ for i, ent := range r.toc.Entries {
+ ent.Name = cleanEntryName(ent.Name)
+ switch ent.Type {
+ case "reg", "chunk":
+ if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
+ chunkTopIndex = i
+ }
+ ent.chunkTopIndex = chunkTopIndex
+ }
+ if ent.Type == "reg" {
+ lastRegEnt = ent
+ }
+ if ent.Type == "chunk" {
+ ent.Name = lastPath
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ if ent.ChunkSize == 0 && lastRegEnt != nil {
+ ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
+ }
+ } else {
+ lastPath = ent.Name
+
+ if ent.Uname != "" {
+ uname[ent.UID] = ent.Uname
+ } else {
+ ent.Uname = uname[ent.UID]
+ }
+ if ent.Gname != "" {
+ gname[ent.GID] = ent.Gname
+ } else {
+ ent.Gname = uname[ent.GID]
+ }
+
+ ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
+
+ if ent.Type == "dir" {
+ ent.NumLink++ // Parent dir links to this directory
+ }
+ r.m[ent.Name] = ent
+ }
+ if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
+ r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ }
+ if ent.ChunkSize == 0 && ent.Size != 0 {
+ ent.ChunkSize = ent.Size
+ }
+ }
+
+ // Populate children, add implicit directories:
+ for _, ent := range r.toc.Entries {
+ if ent.Type == "chunk" {
+ continue
+ }
+ // add "foo/":
+ // add "foo" child to "" (creating "" if necessary)
+ //
+ // add "foo/bar/":
+ // add "bar" child to "foo" (creating "foo" if necessary)
+ //
+ // add "foo/bar.txt":
+ // add "bar.txt" child to "foo" (creating "foo" if necessary)
+ //
+ // add "a/b/c/d/e/f.txt":
+ // create "a/b/c/d/e" node
+ // add "f.txt" child to "e"
+
+ name := ent.Name
+ pdirName := parentDir(name)
+ if name == pdirName {
+ // This entry and its parent are the same.
+ // Ignore this for avoiding infinite loop of the reference.
+ // The example case where this can occur is when tar contains the root
+ // directory itself (e.g. "./", "/").
+ continue
+ }
+ pdir := r.getOrCreateDir(pdirName)
+ ent.NumLink++ // at least one name(ent.Name) references this entry.
+ if ent.Type == "hardlink" {
+ org, err := r.getSource(ent)
+ if err != nil {
+ return err
+ }
+ org.NumLink++ // original entry is referenced by this ent.Name.
+ ent = org
+ }
+ pdir.addChild(path.Base(name), ent)
+ }
+
+ lastOffset := r.sr.Size()
+ for i := len(r.toc.Entries) - 1; i >= 0; i-- {
+ e := r.toc.Entries[i]
+ if e.isDataType() {
+ e.nextOffset = lastOffset
+ }
+ if e.Offset != 0 && e.InnerOffset == 0 {
+ lastOffset = e.Offset
+ }
+ }
+
+ return nil
+}
+
+func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
+ if ent.Type == "hardlink" {
+ org, ok := r.m[cleanEntryName(ent.LinkName)]
+ if !ok {
+ return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ }
+ ent, err = r.getSource(org)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ent, nil
+}
+
+func parentDir(p string) string {
+ dir, _ := path.Split(p)
+ return strings.TrimSuffix(dir, "/")
+}
+
+func (r *Reader) getOrCreateDir(d string) *TOCEntry {
+ e, ok := r.m[d]
+ if !ok {
+ e = &TOCEntry{
+ Name: d,
+ Type: "dir",
+ Mode: 0755,
+ NumLink: 2, // The directory itself(.) and the parent link to this directory.
+ }
+ r.m[d] = e
+ if d != "" {
+ pdir := r.getOrCreateDir(parentDir(d))
+ pdir.addChild(path.Base(d), e)
+ }
+ }
+ return e
+}
+
+func (r *Reader) TOCDigest() digest.Digest {
+ return r.tocDigest
+}
+
+// VerifyTOC checks that the TOC JSON in the passed blob matches the
+// passed digests and that the TOC JSON contains digests for all chunks
+// contained in the blob. If the verification succceeds, this function
+// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
+func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
+ // Verify the digest of TOC JSON
+ if r.tocDigest != tocDigest {
+ return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
+ }
+ return r.Verifiers()
+}
+
+// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
+// because this doesn't verify TOC.
+func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
+ chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
+ regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
+ var chunkDigestMapIncomplete bool
+ var regDigestMapIncomplete bool
+ var containsChunk bool
+ for _, e := range r.toc.Entries {
+ if e.Type != "reg" && e.Type != "chunk" {
+ continue
+ }
+
+ // offset must be unique in stargz blob
+ _, dOK := chunkDigestMap[e.Offset]
+ _, rOK := regDigestMap[e.Offset]
+ if dOK || rOK {
+ return nil, fmt.Errorf("offset %d found twice", e.Offset)
+ }
+
+ if e.Type == "reg" {
+ if e.Size == 0 {
+ continue // ignores empty file
+ }
+
+ // record the digest of regular file payload
+ if e.Digest != "" {
+ d, err := digest.Parse(e.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
+ }
+ regDigestMap[e.Offset] = d
+ } else {
+ regDigestMapIncomplete = true
+ }
+ } else {
+ containsChunk = true // this layer contains "chunk" entries.
+ }
+
+ // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
+ // chunked file)
+ if e.ChunkDigest != "" {
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
+ }
+ chunkDigestMap[e.Offset] = d
+ } else {
+ chunkDigestMapIncomplete = true
+ }
+ }
+
+ if chunkDigestMapIncomplete {
+ // Though some chunk digests are not found, if this layer doesn't contain
+ // "chunk"s and all digest of "reg" files are recorded, we can use them instead.
+ if !containsChunk && !regDigestMapIncomplete {
+ return &verifier{digestMap: regDigestMap}, nil
+ }
+ return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON")
+ }
+
+ return &verifier{digestMap: chunkDigestMap}, nil
+}
+
+// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
+// offset of the chunk.
+type verifier struct {
+ digestMap map[int64]digest.Digest
+ digestMapMu sync.Mutex
+}
+
+// Verifier returns a content verifier specified by TOCEntry.
+func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
+ v.digestMapMu.Lock()
+ defer v.digestMapMu.Unlock()
+ d, ok := v.digestMap[ce.Offset]
+ if !ok {
+ return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
+ ce.Offset, ce.ChunkSize)
+ }
+ return d.Verifier(), nil
+}
+
+// ChunkEntryForOffset returns the TOCEntry containing the byte of the
+// named file at the given offset within the file.
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
+ name = cleanEntryName(name)
+ e, ok = r.Lookup(name)
+ if !ok || !e.isDataType() {
+ return nil, false
+ }
+ ents := r.chunks[name]
+ if len(ents) < 2 {
+ if offset >= e.ChunkSize {
+ return nil, false
+ }
+ return e, true
+ }
+ i := sort.Search(len(ents), func(i int) bool {
+ e := ents[i]
+ return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
+ })
+ if i == len(ents) {
+ return nil, false
+ }
+ return ents[i], true
+}
+
+// Lookup returns the Table of Contents entry for the given path.
+//
+// To get the root directory, use the empty string.
+// Path must be absolute path or one that is relative to root.
+func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
+ path = cleanEntryName(path)
+ if r == nil {
+ return
+ }
+ e, ok = r.m[path]
+ if ok && e.Type == "hardlink" {
+ var err error
+ e, err = r.getSource(e)
+ if err != nil {
+ return nil, false
+ }
+ }
+ return
+}
+
+// OpenFile returns the reader of the specified file payload.
+//
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) newFileReader(name string) (*fileReader, error) {
+ name = cleanEntryName(name)
+ ent, ok := r.Lookup(name)
+ if !ok {
+ // TODO: come up with some error plan. This is lazy:
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: os.ErrNotExist,
+ }
+ }
+ if ent.Type != "reg" {
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: errors.New("not a regular file"),
+ }
+ }
+ return &fileReader{
+ r: r,
+ size: ent.Size,
+ ents: r.getChunks(ent),
+ }, nil
+}
+
+func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ fr.preRead = preRead
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
+ if ents, ok := r.chunks[ent.Name]; ok {
+ return ents
+ }
+ return []*TOCEntry{ent}
+}
+
+type fileReader struct {
+ r *Reader
+ size int64
+ ents []*TOCEntry // 1 or more reg/chunk entries
+ preRead func(*TOCEntry, io.Reader) error
+}
+
+func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
+ if off >= fr.size {
+ return 0, io.EOF
+ }
+ if off < 0 {
+ return 0, errors.New("invalid offset")
+ }
+ var i int
+ if len(fr.ents) > 1 {
+ i = sort.Search(len(fr.ents), func(i int) bool {
+ return fr.ents[i].ChunkOffset >= off
+ })
+ if i == len(fr.ents) {
+ i = len(fr.ents) - 1
+ }
+ }
+ ent := fr.ents[i]
+ if ent.ChunkOffset > off {
+ if i == 0 {
+ return 0, errors.New("internal error; first chunk offset is non-zero")
+ }
+ ent = fr.ents[i-1]
+ }
+
+ // If ent is a chunk of a large file, adjust the ReadAt
+ // offset by the chunk's offset.
+ off -= ent.ChunkOffset
+
+ finalEnt := fr.ents[len(fr.ents)-1]
+ compressedOff := ent.Offset
+ // compressedBytesRemain is the number of compressed bytes in this
+ // file remaining, over 1+ chunks.
+ compressedBytesRemain := finalEnt.NextOffset() - compressedOff
+
+ sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain)
+
+ const maxRead = 2 << 20
+ var bufSize = maxRead
+ if compressedBytesRemain < maxRead {
+ bufSize = int(compressedBytesRemain)
+ }
+
+ br := bufio.NewReaderSize(sr, bufSize)
+ if _, err := br.Peek(bufSize); err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
+ }
+
+ dr, err := fr.r.decompressor.Reader(br)
+ if err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
+ }
+ defer dr.Close()
+
+ if fr.preRead == nil {
+ if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
+ return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
+ }
+ return io.ReadFull(dr, p)
+ }
+
+ var retN int
+ var retErr error
+ var found bool
+ var nr int64
+ for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
+ if !e.isDataType() {
+ continue
+ }
+ if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
+ break
+ }
+ if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
+ return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
+ }
+ nr = e.InnerOffset
+ if e == ent {
+ found = true
+ if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
+ return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
+ }
+ retN, retErr = io.ReadFull(dr, p)
+ nr += off + int64(retN)
+ continue
+ }
+ cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
+ if err := fr.preRead(e, cr); err != nil {
+ return 0, fmt.Errorf("failed to pre read: %w", err)
+ }
+ nr += cr.n
+ }
+ if !found {
+ return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
+ }
+ return retN, retErr
+}
+
+// A Writer writes stargz files.
+//
+// Use NewWriter to create a new Writer.
+type Writer struct {
+ bw *bufio.Writer
+ cw *countWriter
+ toc *JTOC
+ diffHash hash.Hash // SHA-256 of uncompressed tar
+
+ closed bool
+ gz io.WriteCloser
+ lastUsername map[int]string
+ lastGroupname map[int]string
+ compressor Compressor
+
+ uncompressedCounter *countWriteFlusher
+
+ // ChunkSize optionally controls the maximum number of bytes
+ // of data of a regular file that can be written in one gzip
+ // stream before a new gzip stream is started.
+ // Zero means to use a default, currently 4 MiB.
+ ChunkSize int
+
+ // MinChunkSize optionally controls the minimum number of bytes
+ // of data must be written in one gzip stream before a new gzip
+ // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
+ MinChunkSize int
+
+ needsOpenGzEntries map[string]struct{}
+}
+
+// currentCompressionWriter writes to the current w.gz field, which can
+// change throughout writing a tar entry.
+//
+// Additionally, it updates w's SHA-256 of the uncompressed bytes
+// of the tar file.
+type currentCompressionWriter struct{ w *Writer }
+
+func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
+ ccw.w.diffHash.Write(p)
+ if ccw.w.gz == nil {
+ if err := ccw.w.condOpenGz(); err != nil {
+ return 0, err
+ }
+ }
+ return ccw.w.gz.Write(p)
+}
+
+func (w *Writer) chunkSize() int {
+ if w.ChunkSize <= 0 {
+ return 4 << 20
+ }
+ return w.ChunkSize
+}
+
+// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
+// TOC JSON and footer are removed.
+func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
+ footerSize := c.FooterSize()
+ if sr.Size() < footerSize {
+ return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
+ }
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ return nil, err
+ }
+ blobPayloadSize, _, _, err := c.ParseFooter(footer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse footer: %w", err)
+ }
+ if blobPayloadSize < 0 {
+ blobPayloadSize = sr.Size()
+ }
+ return c.Reader(io.LimitReader(sr, blobPayloadSize))
+}
+
+// NewWriter returns a new stargz writer (gzip-based) writing to w.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterLevel(w, gzip.BestCompression)
+}
+
+// NewWriterLevel returns a new stargz writer (gzip-based) writing to w.
+// The compression level is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
+ return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
+}
+
+// NewWriterWithCompressor returns a new stargz writer writing to w.
+// The compression method is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
+ bw := bufio.NewWriter(w)
+ cw := &countWriter{w: bw}
+ return &Writer{
+ bw: bw,
+ cw: cw,
+ toc: &JTOC{Version: 1},
+ diffHash: sha256.New(),
+ compressor: c,
+ uncompressedCounter: &countWriteFlusher{},
+ }
+}
+
+// Close writes the stargz's table of contents and flushes all the
+// buffers, returning any error.
+func (w *Writer) Close() (digest.Digest, error) {
+ if w.closed {
+ return "", nil
+ }
+ defer func() { w.closed = true }()
+
+ if err := w.closeGz(); err != nil {
+ return "", err
+ }
+
+ // Write the TOC index and footer.
+ tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash)
+ if err != nil {
+ return "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return "", err
+ }
+
+ return tocDigest, nil
+}
+
+func (w *Writer) closeGz() error {
+ if w.closed {
+ return errors.New("write on closed Writer")
+ }
+ if w.gz != nil {
+ if err := w.gz.Close(); err != nil {
+ return err
+ }
+ w.gz = nil
+ }
+ return nil
+}
+
+func (w *Writer) flushGz() error {
+ if w.closed {
+ return errors.New("flush on closed Writer")
+ }
+ if w.gz != nil {
+ if f, ok := w.gz.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ }
+ return nil
+}
+
+// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
+// in which case it returns the empty string.
+func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
+ if name == "" {
+ return ""
+ }
+ if *mp == nil {
+ *mp = make(map[int]string)
+ }
+ if (*mp)[id] == name {
+ return ""
+ }
+ (*mp)[id] = name
+ return name
+}
+
+func (w *Writer) condOpenGz() (err error) {
+ if w.gz == nil {
+ w.gz, err = w.compressor.Writer(w.cw)
+ if w.gz != nil {
+ w.gz = w.uncompressedCounter.register(w.gz)
+ }
+ }
+ return
+}
+
+// AppendTar reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+func (w *Writer) AppendTar(r io.Reader) error {
+ return w.appendTar(r, false)
+}
+
+// AppendTarLossLess reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+//
+// The difference of this func with AppendTar is that this writes
+// the input tar stream into w without any modification (e.g. to header bytes).
+//
+// Note that if the input tar stream already contains TOC JSON, this returns
+// error because w cannot overwrite the TOC JSON to the one generated by w without
+// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
+// you shoud decompress it and remove TOC JSON in advance.
+func (w *Writer) AppendTarLossLess(r io.Reader) error {
+ return w.appendTar(r, true)
+}
+
+func (w *Writer) appendTar(r io.Reader, lossless bool) error {
+ var src io.Reader
+ br := bufio.NewReader(r)
+ if isGzip(br) {
+ zr, _ := gzip.NewReader(br)
+ src = zr
+ } else {
+ src = io.Reader(br)
+ }
+ dst := currentCompressionWriter{w}
+ var tw *tar.Writer
+ if !lossless {
+ tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
+ }
+ tr := tar.NewReader(src)
+ if lossless {
+ tr.RawAccounting = true
+ }
+ prevOffset := w.cw.n
+ var prevOffsetUncompressed int64
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ if lossless {
+ if remain := tr.RawBytes(); len(remain) > 0 {
+ // Collect the remaining null bytes.
+ // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
+ if _, err := dst.Write(remain); err != nil {
+ return err
+ }
+ }
+ }
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
+ }
+ if cleanEntryName(h.Name) == TOCTarName {
+ // It is possible for a layer to be "stargzified" twice during the
+ // distribution lifecycle. So we reserve "TOCTarName" here to avoid
+ // duplicated entries in the resulting layer.
+ if lossless {
+ // We cannot handle this in lossless way.
+ return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
+ }
+ continue
+ }
+
+ xattrs := make(map[string][]byte)
+ const xattrPAXRecordsPrefix = "SCHILY.xattr."
+ if h.PAXRecords != nil {
+ for k, v := range h.PAXRecords {
+ if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
+ xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
+ }
+ }
+ }
+ ent := &TOCEntry{
+ Name: h.Name,
+ Mode: h.Mode,
+ UID: h.Uid,
+ GID: h.Gid,
+ Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
+ Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
+ ModTime3339: formatModtime(h.ModTime),
+ Xattrs: xattrs,
+ }
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+ if tw != nil {
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ } else {
+ if _, err := dst.Write(tr.RawBytes()); err != nil {
+ return err
+ }
+ }
+ switch h.Typeflag {
+ case tar.TypeLink:
+ ent.Type = "hardlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeSymlink:
+ ent.Type = "symlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeDir:
+ ent.Type = "dir"
+ case tar.TypeReg:
+ ent.Type = "reg"
+ ent.Size = h.Size
+ case tar.TypeChar:
+ ent.Type = "char"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeBlock:
+ ent.Type = "block"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeFifo:
+ ent.Type = "fifo"
+ default:
+ return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
+ }
+
+ // We need to keep a reference to the TOC entry for regular files, so that we
+ // can fill the digest later.
+ var regFileEntry *TOCEntry
+ var payloadDigest digest.Digester
+ if h.Typeflag == tar.TypeReg {
+ regFileEntry = ent
+ payloadDigest = digest.Canonical.Digester()
+ }
+
+ if h.Typeflag == tar.TypeReg && ent.Size > 0 {
+ var written int64
+ totalSize := ent.Size // save it before we destroy ent
+ tee := io.TeeReader(tr, payloadDigest.Hash())
+ for written < totalSize {
+ chunkSize := int64(w.chunkSize())
+ remain := totalSize - written
+ if remain < chunkSize {
+ chunkSize = remain
+ } else {
+ ent.ChunkSize = chunkSize
+ }
+
+ // We flush the underlying compression writer here to correctly calculate "w.cw.n".
+ if err := w.flushGz(); err != nil {
+ return err
+ }
+ if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
+ if err := w.closeGz(); err != nil {
+ return err
+ }
+ ent.Offset = w.cw.n
+ prevOffset = ent.Offset
+ prevOffsetUncompressed = w.uncompressedCounter.n
+ } else {
+ ent.Offset = prevOffset
+ ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
+ }
+
+ ent.ChunkOffset = written
+ chunkDigest := digest.Canonical.Digester()
+
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+
+ teeChunk := io.TeeReader(tee, chunkDigest.Hash())
+ var out io.Writer
+ if tw != nil {
+ out = tw
+ } else {
+ out = dst
+ }
+ if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
+ return fmt.Errorf("error copying %q: %v", h.Name, err)
+ }
+ ent.ChunkDigest = chunkDigest.Digest().String()
+ w.toc.Entries = append(w.toc.Entries, ent)
+ written += chunkSize
+ ent = &TOCEntry{
+ Name: h.Name,
+ Type: "chunk",
+ }
+ }
+ } else {
+ w.toc.Entries = append(w.toc.Entries, ent)
+ }
+ if payloadDigest != nil {
+ regFileEntry.Digest = payloadDigest.Digest().String()
+ }
+ if tw != nil {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ }
+ }
+ remainDest := io.Discard
+ if lossless {
+ remainDest = dst // Preserve the remaining bytes in lossless mode
+ }
+ _, err := io.Copy(remainDest, src)
+ return err
+}
+
+func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
+ if ent.Type != "reg" {
+ return false
+ }
+ if w.needsOpenGzEntries == nil {
+ return false
+ }
+ _, ok := w.needsOpenGzEntries[ent.Name]
+ return ok
+}
+
+// DiffID returns the SHA-256 of the uncompressed tar bytes.
+// It is only valid to call DiffID after Close.
+func (w *Writer) DiffID() string {
+ return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
+}
+
+func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
+ for _, d := range decompressors {
+ if s := d.FooterSize(); res < s && s <= blobSize {
+ res = s
+ }
+ }
+ return
+}
+
+func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
+ if tocOff < 0 {
+ // This means that TOC isn't contained in the blob.
+ // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
+ // the external location.
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(nil)
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ if len(tocBytes) > 0 {
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err == nil {
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ }
+
+ start := time.Now()
+ tocBytes = make([]byte, tocSize)
+ if _, err := sr.ReadAt(tocBytes, tocOff); err != nil {
+ return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ start = time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+}
+
+func formatModtime(t time.Time) string {
+ if t.IsZero() || t.Unix() == 0 {
+ return ""
+ }
+ return t.UTC().Round(time.Second).Format(time.RFC3339)
+}
+
+func cleanEntryName(name string) string {
+ // Use path.Clean to consistently deal with path separators across platforms.
+ return strings.TrimPrefix(path.Clean("/"+name), "/")
+}
+
+// countWriter counts how many bytes have been written to its wrapped
+// io.Writer.
+type countWriter struct {
+ w io.Writer
+ n int64
+}
+
+func (cw *countWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.w.Write(p)
+ cw.n += int64(n)
+ return
+}
+
+type countWriteFlusher struct {
+ io.WriteCloser
+ n int64
+}
+
+func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
+ wc.WriteCloser = w
+ return wc
+}
+
+func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
+ n, err = wc.WriteCloser.Write(p)
+ wc.n += int64(n)
+ return
+}
+
+func (wc *countWriteFlusher) Flush() error {
+ if f, ok := wc.WriteCloser.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ return nil
+}
+
+func (wc *countWriteFlusher) Close() error {
+ err := wc.WriteCloser.Close()
+ wc.WriteCloser = nil
+ return err
+}
+
+// isGzip reports whether br is positioned right before an upcoming gzip stream.
+// It does not consume any bytes from br.
+func isGzip(br *bufio.Reader) bool {
+ const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ )
+ peek, _ := br.Peek(3)
+ return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
+
+type countReader struct {
+ r io.Reader
+ n int64
+}
+
+func (cr *countReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ cr.n += int64(n)
+ return
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
new file mode 100644
index 000000000..d9b5df8c7
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -0,0 +1,10 @@
+module github.com/containerd/stargz-snapshotter/estargz
+
+go 1.19
+
+require (
+ github.com/klauspost/compress v1.17.2
+ github.com/opencontainers/go-digest v1.0.0
+ github.com/vbatts/tar-split v0.11.5
+ golang.org/x/sync v0.4.0
+)
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
new file mode 100644
index 000000000..fc8c93014
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -0,0 +1,8 @@
+github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
+github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
new file mode 100644
index 000000000..f24afe32f
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -0,0 +1,237 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type gzipCompression struct {
+ *GzipCompressor
+ *GzipDecompressor
+}
+
+func newGzipCompressionWithLevel(level int) Compression {
+ return &gzipCompression{
+ &GzipCompressor{level},
+ &GzipDecompressor{},
+ }
+}
+
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{gzip.BestCompression}
+}
+
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{level}
+}
+
+type GzipCompressor struct {
+ compressionLevel int
+}
+
+func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
+ return gzip.NewWriterLevel(w, gc.compressionLevel)
+}
+
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
+ gw := io.Writer(gz)
+ if diffHash != nil {
+ gw = io.MultiWriter(gz, diffHash)
+ }
+ tw := tar.NewWriter(gw)
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: TOCTarName,
+ Size: int64(len(tocJSON)),
+ }); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(tocJSON); err != nil {
+ return "", err
+ }
+
+ if err := tw.Close(); err != nil {
+ return "", err
+ }
+ if err := gz.Close(); err != nil {
+ return "", err
+ }
+ if _, err := w.Write(gzipFooterBytes(off)); err != nil {
+ return "", err
+ }
+ return digest.FromBytes(tocJSON), nil
+}
+
+// gzipFooterBytes returns the 51 bytes footer.
+func gzipFooterBytes(tocOff int64) []byte {
+ buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
+ gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
+
+ // Extra header indicating the offset of TOCJSON
+ // https://tools.ietf.org/html/rfc1952#section-2.3.1.1
+ header := make([]byte, 4)
+ header[0], header[1] = 'S', 'G'
+ subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
+ binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
+ gz.Header.Extra = append(header, []byte(subfield)...)
+ gz.Close()
+ if buf.Len() != FooterSize {
+ panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
+ }
+ return buf.Bytes()
+}
+
+type GzipDecompressor struct{}
+
+func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != FooterSize {
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
+ if si1 != 'S' || si2 != 'G' {
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ }
+ if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ }
+ if string(subfield[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ }
+ tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *GzipDecompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+type LegacyGzipDecompressor struct{}
+
+func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != legacyFooterSize {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ if len(extra) != 16+len("STARGZ") {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
+ }
+ if string(extra[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
+ }
+ tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *LegacyGzipDecompressor) FooterSize() int64 {
+ return legacyFooterSize
+}
+
+func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ zr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
+ }
+ zr.Multistream(false)
+ tr := tar.NewReader(zr)
+ h, err := tr.Next()
+ if err != nil {
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ }
+ if h.Name != TOCTarName {
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
+ }
+ return readCloser{tr, zr.Close}, nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
new file mode 100644
index 000000000..0ca6fd75f
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -0,0 +1,2366 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// TestingController is Compression with some helper methods necessary for testing.
+type TestingController interface {
+ Compression
+ TestStreams(t *testing.T, b []byte, streams []int64)
+ DiffIDOf(*testing.T, []byte) string
+ String() string
+}
+
+// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
+func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
+ t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
+ t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
+ t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
+}
+
+type TestingControllerFactory func() TestingController
+
+const (
+ uncompressedType int = iota
+ gzipType
+ zstdType
+)
+
+var srcCompressions = []int{
+ uncompressedType,
+ gzipType,
+ zstdType,
+}
+
+var allowedPrefix = [4]string{"", "./", "/", "../"}
+
+// testBuild tests the resulting stargz blob built by this pkg has the same
+// contents as the normal stargz blob.
+func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize []int
+ in []tarEntry
+ }{
+ {
+ name: "regfiles and directories",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "test1"),
+ dir("foo2/"),
+ file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})),
+ ),
+ },
+ {
+ name: "empty files",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "tttttt"),
+ file("foo_empty", ""),
+ file("foo2", "tttttt"),
+ file("foo_empty2", ""),
+ file("foo3", "tttttt"),
+ file("foo_empty3", ""),
+ file("foo4", "tttttt"),
+ file("foo_empty4", ""),
+ file("foo5", "tttttt"),
+ file("foo_empty5", ""),
+ file("foo6", "tttttt"),
+ ),
+ },
+ {
+ name: "various files",
+ chunkSize: 4,
+ minChunkSize: []int{0, 64000},
+ in: tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo1.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ {
+ name: "no contents",
+ chunkSize: 4,
+ in: tarOf(
+ file("baz.txt", ""),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ }
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
+ tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
+ // Test divideEntries()
+ entries, err := sortEntries(tarBlob, nil, nil) // identical order
+ if err != nil {
+ t.Fatalf("failed to parse tar: %v", err)
+ }
+ var merged []*entry
+ for _, part := range divideEntries(entries, 4) {
+ merged = append(merged, part...)
+ }
+ if !reflect.DeepEqual(entries, merged) {
+ for _, e := range entries {
+ t.Logf("Original: %v", e.header)
+ }
+ for _, e := range merged {
+ t.Logf("Merged: %v", e.header)
+ }
+ t.Errorf("divided entries couldn't be merged")
+ return
+ }
+
+ // Prepare sample data
+ cl1 := newCL()
+ wantBuf := new(bytes.Buffer)
+ sw := NewWriterWithCompressor(wantBuf, cl1)
+ sw.MinChunkSize = minChunkSize
+ sw.ChunkSize = tt.chunkSize
+ if err := sw.AppendTar(tarBlob); err != nil {
+ t.Fatalf("failed to append tar to want stargz: %v", err)
+ }
+ if _, err := sw.Close(); err != nil {
+ t.Fatalf("failed to prepare want stargz: %v", err)
+ }
+ wantData := wantBuf.Bytes()
+ want, err := Open(io.NewSectionReader(
+ bytes.NewReader(wantData), 0, int64(len(wantData))),
+ WithDecompressors(cl1),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the want stargz: %v", err)
+ }
+
+ // Prepare testing data
+ var opts []Option
+ if minChunkSize > 0 {
+ opts = append(opts, WithMinChunkSize(minChunkSize))
+ }
+ cl2 := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
+ if err != nil {
+ t.Fatalf("failed to build stargz: %v", err)
+ }
+ defer rc.Close()
+ gotBuf := new(bytes.Buffer)
+ if _, err := io.Copy(gotBuf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ gotData := gotBuf.Bytes()
+ got, err := Open(io.NewSectionReader(
+ bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
+ WithDecompressors(cl2),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the got stargz: %v", err)
+ }
+
+ // Check DiffID is properly calculated
+ rc.Close()
+ diffID := rc.DiffID()
+ wantDiffID := cl2.DiffIDOf(t, gotData)
+ if diffID.String() != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ // Compare as stargz
+ if !isSameVersion(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz hasn't same json")
+ return
+ }
+ if !isSameEntries(t, want, got) {
+ t.Errorf("built stargz isn't same as the original")
+ return
+ }
+
+ // Compare as tar.gz
+ if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz isn't same tar.gz")
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aGz, err := cla.Reader(bytes.NewReader(a))
+ if err != nil {
+ t.Fatalf("failed to read A")
+ }
+ defer aGz.Close()
+ bGz, err := clb.Reader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("failed to read B")
+ }
+ defer bGz.Close()
+
+ // Same as tar's Next() method but ignores landmarks and TOCJSON file
+ next := func(r *tar.Reader) (h *tar.Header, err error) {
+ for {
+ if h, err = r.Next(); err != nil {
+ return
+ }
+ if h.Name != PrefetchLandmark &&
+ h.Name != NoPrefetchLandmark &&
+ h.Name != TOCTarName {
+ return
+ }
+ }
+ }
+
+ aTar := tar.NewReader(aGz)
+ bTar := tar.NewReader(bGz)
+ for {
+ // Fetch and parse next header.
+ aH, aErr := next(aTar)
+ bH, bErr := next(bTar)
+ if aErr != nil || bErr != nil {
+ if aErr == io.EOF && bErr == io.EOF {
+ break
+ }
+ t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr)
+ }
+ if !reflect.DeepEqual(aH, bH) {
+ t.Logf("different header (A = %v; B = %v)", aH, bH)
+ return false
+
+ }
+ aFile, err := io.ReadAll(aTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of A")
+ }
+ bFile, err := io.ReadAll(bTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of B")
+ }
+ if !bytes.Equal(aFile, bFile) {
+ t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b))
+ return false
+ }
+ }
+
+ return true
+}
+
+func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
+ if err != nil {
+ t.Fatalf("failed to parse A: %v", err)
+ }
+ bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
+ if err != nil {
+ t.Fatalf("failed to parse B: %v", err)
+ }
+ t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC))
+ t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC))
+ return aJTOC.Version == bJTOC.Version
+}
+
+func isSameEntries(t *testing.T, a, b *Reader) bool {
+ aroot, ok := a.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of A")
+ }
+ broot, ok := b.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of B")
+ }
+ aEntry := stargzEntry{aroot, a}
+ bEntry := stargzEntry{broot, b}
+ return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
+}
+
+func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
+ buf := new(bytes.Buffer)
+ var w io.WriteCloser
+ var err error
+ if srcCompression == gzipType {
+ w = gzip.NewWriter(buf)
+ } else if srcCompression == zstdType {
+ w, err = zstd.NewWriter(buf)
+ if err != nil {
+ t.Fatalf("failed to init zstd writer: %v", err)
+ }
+ } else {
+ return src
+ }
+ src.Seek(0, io.SeekStart)
+ if _, err := io.Copy(w, src); err != nil {
+ t.Fatalf("failed to compress source")
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("failed to finalize compress source")
+ }
+ data := buf.Bytes()
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+
+}
+
+type stargzEntry struct {
+ e *TOCEntry
+ r *Reader
+}
+
+// contains checks if all child entries in "b" are also contained in "a".
+// This function also checks if the files/chunks contain the same contents among "a" and "b".
+func contains(t *testing.T, a, b stargzEntry) bool {
+ ae, ar := a.e, a.r
+ be, br := b.e, b.r
+ t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
+ if !equalEntry(ae, be) {
+ t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be)
+ return false
+ }
+ if ae.Type == "dir" {
+ t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name,
+ allChildrenName(ae), allChildrenName(be))
+ iscontain := true
+ ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool {
+ // Walk through all files on this stargz file.
+
+ if aChild.Name == PrefetchLandmark ||
+ aChild.Name == NoPrefetchLandmark {
+ return true // Ignore landmarks
+ }
+
+ // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory
+ // because this points to the root directory itself.
+ if aChild.Name == "" && ae.Name == "" {
+ return true
+ }
+
+ bChild, ok := be.LookupChild(aBaseName)
+ if !ok {
+ t.Logf("%q (base: %q): not found in b: %v",
+ ae.Name, aBaseName, allChildrenName(be))
+ iscontain = false
+ return false
+ }
+
+ childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r})
+ if !childcontain {
+ t.Logf("%q != %q: non-equal dir", ae.Name, be.Name)
+ iscontain = false
+ return false
+ }
+ return true
+ })
+ return iscontain
+ } else if ae.Type == "reg" {
+ af, err := ar.OpenFile(ae.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on A: %v", ae.Name, err)
+ }
+ bf, err := br.OpenFile(be.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on B: %v", be.Name, err)
+ }
+
+ var nr int64
+ for nr < ae.Size {
+ abytes, anext, aok := readOffset(t, af, nr, a)
+ bbytes, bnext, bok := readOffset(t, bf, nr, b)
+ if !aok && !bok {
+ break
+ } else if !(aok && bok) || anext != bnext {
+ t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
+ ae.Name, be.Name, nr, aok, bok, anext, bnext)
+ return false
+ }
+ nr = anext
+ if !bytes.Equal(abytes, bbytes) {
+ t.Logf("%q != %q: different contents %v vs %v",
+ ae.Name, be.Name, string(abytes), string(bbytes))
+ return false
+ }
+ }
+ return true
+ }
+
+ return true
+}
+
+func allChildrenName(e *TOCEntry) (children []string) {
+ e.ForeachChild(func(baseName string, _ *TOCEntry) bool {
+ children = append(children, baseName)
+ return true
+ })
+ return
+}
+
+func equalEntry(a, b *TOCEntry) bool {
+ // Here, we selectively compare fileds that we are interested in.
+ return a.Name == b.Name &&
+ a.Type == b.Type &&
+ a.Size == b.Size &&
+ a.ModTime3339 == b.ModTime3339 &&
+ a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time
+ a.LinkName == b.LinkName &&
+ a.Mode == b.Mode &&
+ a.UID == b.UID &&
+ a.GID == b.GID &&
+ a.Uname == b.Uname &&
+ a.Gname == b.Gname &&
+ (a.Offset >= 0) == (b.Offset >= 0) &&
+ (a.NextOffset() > 0) == (b.NextOffset() > 0) &&
+ a.DevMajor == b.DevMajor &&
+ a.DevMinor == b.DevMinor &&
+ a.NumLink == b.NumLink &&
+ reflect.DeepEqual(a.Xattrs, b.Xattrs) &&
+ // chunk-related infomations aren't compared in this function.
+ // ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ // ChunkSize int64 `json:"chunkSize,omitempty"`
+ // children map[string]*TOCEntry
+ a.Digest == b.Digest
+}
+
+func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
+ ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
+ if !ok {
+ return nil, 0, false
+ }
+ data := make([]byte, ce.ChunkSize)
+ t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset())
+ n, err := r.ReadAt(data, ce.ChunkOffset)
+ if err != nil {
+ t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v",
+ e.e.Name, ce.ChunkOffset, ce.ChunkSize, err)
+ }
+ if int64(n) != ce.ChunkSize {
+ t.Fatalf("unexpected copied data size %d; want %d",
+ n, ce.ChunkSize)
+ }
+ return data[:n], offset + ce.ChunkSize, true
+}
+
+func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
+ jtocData, err := json.Marshal(*tocJSON)
+ if err != nil {
+ t.Fatalf("failed to marshal TOC JSON: %v", err)
+ }
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil {
+ t.Fatalf("failed to read toc json blob: %v", err)
+ }
+ return buf.String()
+}
+
+const chunkSize = 3
+
+// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
+type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
+
+// testDigestAndVerify runs specified checks against sample stargz blobs.
+func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
+ checks []check
+ minChunkSize []int
+ }{
+ {
+ name: "no-regfile",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ dir("test/"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ dir("test2/"), // modified
+ ), allowedPrefix[0])),
+ },
+ },
+ {
+ name: "small-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "bbb", dgstMap),
+ )
+ },
+ minChunkSize: []int{0, 64000},
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", ""),
+ file("foo.txt", "M"), // modified
+ dir("test/"),
+ file("test/bar.txt", "bbb"),
+ ), allowedPrefix[0])),
+ // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO
+ checkVerifyBrokenContentFail("foo.txt"),
+ },
+ },
+ {
+ name: "big-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
+ file("foo.txt", "a"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ {
+ name: "with-non-regfiles",
+ minChunkSize: []int{0, 64000},
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ regDigest(t, "bar/foo2.txt", "b", dgstMap),
+ regDigest(t, "foo3.txt", "c", dgstMap),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ dir("test2/"),
+ link("test2/bazlink", "foo.txt"), // modified
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
+ // Get original tar file and chunk digests
+ dgstMap := make(map[string]digest.Digest)
+ tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
+
+ cl := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ tocDigest := rc.TOCDigest()
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ newStargz := buf.Bytes()
+ // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
+ dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
+
+ for _, check := range tt.checks {
+ check(t, newStargz, tocDigest, dgstMap, cl, newCL)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
+// digest and contains valid chunks. It walks all entries in the stargz and
+// checks all chunk digests stored to the TOC JSON match the actual contents.
+func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ digestMapTOC, err := listDigests(io.NewSectionReader(
+ bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ controller,
+ )
+ if err != nil {
+ t.Fatalf("failed to list digest: %v", err)
+ }
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ // Check the digest of TOC JSON based on the actual contents
+ // It's sure that TOC JSON exists in this archive because
+ // Open succeeded.
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(dgstr.Hash(), tr); err != nil {
+ t.Fatalf("failed to calculate digest of TOC JSON: %v",
+ err)
+ }
+ if dgstr.Digest() != tocDigest {
+ t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest())
+ }
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ // Get the original digest to make sure the file contents are kept unchanged
+ // from the original tar, during the whole conversion steps.
+ id := chunkID(h.Name, n, ce.ChunkSize)
+ want, ok := dgstMap[id]
+ if !ok {
+ t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v",
+ h.Name, n, ce.ChunkSize, dgstMap)
+ return
+ }
+ found[id] = true
+
+ // Check the file contents
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if want != dgstr.Digest() {
+ t.Errorf("Invalid contents in converted stargz %q: %q; want %q",
+ h.Name, dgstr.Digest(), want)
+ return
+ }
+
+ // Check the digest stored in TOC JSON
+ dgstTOC, ok := digestMapTOC[ce.Offset]
+ if !ok {
+ t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered",
+ h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset)
+ }
+ if want != dgstTOC {
+ t.Errorf("Invalid digest in TOCEntry %q: %q; want %q",
+ h.Name, dgstTOC, want)
+ return
+ }
+
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyTOC checks the verification works for the TOC JSON of the passed
+// stargz. It walks all entries in the stargz and checks the verifications for
+// all chunks work.
+func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Errorf("failed to verify stargz: %v", err)
+ return
+ }
+
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n)
+ }
+
+ found[chunkID(h.Name, n, ce.ChunkSize)] = true
+
+ // Check the file contents
+ if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if !v.Verified() {
+ t.Errorf("Invalid contents in converted stargz %q (should be succeeded)",
+ h.Name)
+ return
+ }
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
+// detected during the verification and the verification returns an error.
+func checkVerifyInvalidTOCEntryFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ funcs := map[string]rewriteFunc{
+ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var found bool
+ for _, e := range toc.Entries {
+ if cleanEntryName(e.Name) == filename {
+ if e.Type != "reg" && e.Type != "chunk" {
+ t.Fatalf("entry %q to break must be regfile or chunk", filename)
+ }
+ if e.ChunkDigest == "" {
+ t.Fatalf("entry %q is already invalid", filename)
+ }
+ e.ChunkDigest = ""
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf("rewrite target not found")
+ }
+ },
+ "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var (
+ sampleEntry *TOCEntry
+ targetEntry *TOCEntry
+ )
+ for _, e := range toc.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if cleanEntryName(e.Name) == filename {
+ targetEntry = e
+ } else {
+ sampleEntry = e
+ }
+ }
+ }
+ if sampleEntry == nil {
+ t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+ }
+ if targetEntry == nil {
+ t.Fatalf("rewrite target not found")
+ }
+ targetEntry.Offset = sampleEntry.Offset
+ },
+ }
+
+ for name, rFunc := range funcs {
+ t.Run(name, func(t *testing.T) {
+ newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, newSgz); err != nil {
+ t.Fatalf("failed to get converted stargz")
+ }
+ isgz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(newTocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ })
+ }
+ }
+}
+
+// checkVerifyInvalidStargzFail checks if the verification detects that the
+// given stargz file doesn't match to the expected digest and returns error.
+func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ cl := newController()
+ rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ mStargz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(tocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ }
+}
+
+// checkVerifyBrokenContentFail checks if the verifier detects broken contents
+// that doesn't match to the expected digest and returns error.
+func checkVerifyBrokenContentFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ // Parse stargz file
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Fatalf("failed to verify stargz: %v", err)
+ return
+ }
+
+ // Open the target file
+ sr, err := sgz.OpenFile(filename)
+ if err != nil {
+ t.Fatalf("failed to open file %q", filename)
+ }
+ ce, ok := sgz.ChunkEntryForOffset(filename, 0)
+ if !ok {
+ t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0)
+ return
+ }
+ if ce.ChunkSize == 0 {
+ t.Fatalf("file mustn't be empty")
+ return
+ }
+ data := make([]byte, ce.ChunkSize)
+ if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil {
+ t.Errorf("failed to get data of a chunk of %q(offset=%q)",
+ filename, ce.ChunkOffset)
+ }
+
+ // Check the broken chunk (must fail)
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Fatalf("failed to get verifier for %q", filename)
+ }
+ broken := append([]byte{^data[0]}, data[1:]...)
+ if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ filename, ce.ChunkOffset, ce.ChunkSize)
+ }
+ if v.Verified() {
+ t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)",
+ filename, data, broken)
+ }
+ }
+}
+
+func chunkID(name string, offset, size int64) string {
+ return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
+}
+
+type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
+
+func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
+ decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
+ if err != nil {
+ t.Fatalf("failed to extract TOC JSON: %v", err)
+ }
+
+ rewrite(t, decodedJTOC, sgz)
+
+ tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset)
+ if err != nil {
+ t.Fatalf("failed to create toc and footer: %v", err)
+ }
+
+ // Reconstruct stargz file with the modified TOC JSON
+ if _, err := sgz.Seek(0, io.SeekStart); err != nil {
+ t.Fatalf("failed to reset the seek position of stargz: %v", err)
+ }
+ return io.MultiReader(
+ io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON)
+ tocFooter, // Rewritten TOC and footer
+ ), tocDigest
+}
+
+func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) {
+ decodedJTOC, _, err := parseStargz(sgz, controller)
+ if err != nil {
+ return nil, err
+ }
+ digestMap := make(map[int64]digest.Digest)
+ for _, e := range decodedJTOC.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if e.Type == "reg" && e.Size == 0 {
+ continue // ignores empty file
+ }
+ if e.ChunkDigest == "" {
+ return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
+ e.Name, e.Offset)
+ }
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, err
+ }
+ digestMap[e.Offset] = d
+ }
+ }
+ return digestMap, nil
+}
+
+func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) {
+ fSize := controller.FooterSize()
+ footer := make([]byte, fSize)
+ if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
+ return nil, 0, fmt.Errorf("error reading footer: %w", err)
+ }
+ _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
+ }
+
+ // Decode the TOC JSON
+ var tocReader io.Reader
+ if tocOffset >= 0 {
+ tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
+ }
+ decodedJTOC, _, err = controller.ParseTOC(tocReader)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
+ }
+ return decodedJTOC, tocOffset, nil
+}
+
+func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
+ const content = "Some contents"
+ invalidUtf8 := "\xff\xfe\xfd"
+
+ xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
+ sampleOwner := owner{uid: 50, gid: 100}
+
+ data64KB := randomContents(64000)
+
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tarEntry
+ want []stargzCheck
+ wantNumGz int // expected number of streams
+
+ wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
+ wantFailOnLossLess bool
+ wantTOCVersion int // default = 1
+ }{
+ {
+ name: "empty",
+ in: tarOf(),
+ wantNumGz: 2, // (empty tar) + TOC + footer
+ want: checks(
+ numTOCEntries(0),
+ ),
+ },
+ {
+ name: "1dir_1empty_file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", ""),
+ ),
+ wantNumGz: 3, // dir, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", 0),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileDigest("foo/bar.txt", digestFor("")),
+ ),
+ },
+ {
+ name: "1dir_1file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", content, xAttrFile),
+ ),
+ wantNumGz: 4, // var dir, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ hasFileDigest("foo/bar.txt", digestFor(content)),
+ hasFileContentsRange("foo/bar.txt", 0, content),
+ hasFileContentsRange("foo/bar.txt", 1, content[1:]),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileXattrs("foo/bar.txt", "foo", "bar"),
+ hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8),
+ ),
+ },
+ {
+ name: "2meta_2file",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ },
+ {
+ name: "3dir",
+ in: tarOf(
+ dir("bar/"),
+ dir("foo/"),
+ dir("foo/bar/"),
+ ),
+ wantNumGz: 3, // 3 dirs, TOC, footer
+ want: checks(
+ hasDirLinkCount("bar/", 2),
+ hasDirLinkCount("foo/", 3),
+ hasDirLinkCount("foo/bar/", 2),
+ ),
+ },
+ {
+ name: "symlink",
+ in: tarOf(
+ dir("foo/"),
+ symlink("foo/bar", "../../x"),
+ ),
+ wantNumGz: 3, // metas + TOC + footer
+ want: checks(
+ numTOCEntries(2),
+ hasSymlink("foo/bar", "../../x"),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar"),
+ ),
+ },
+ {
+ name: "chunked_file",
+ chunkSize: 4,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
+ ),
+ wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
+ want: checks(
+ numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
+ hasDir("foo/"),
+ hasFileLen("foo/big.txt", len("This is such a big file")),
+ hasFileDigest("foo/big.txt", digestFor("This is such a big file")),
+ hasFileContentsRange("foo/big.txt", 0, "This is such a big file"),
+ hasFileContentsRange("foo/big.txt", 1, "his is such a big file"),
+ hasFileContentsRange("foo/big.txt", 2, "is is such a big file"),
+ hasFileContentsRange("foo/big.txt", 3, "s is such a big file"),
+ hasFileContentsRange("foo/big.txt", 4, " is such a big file"),
+ hasFileContentsRange("foo/big.txt", 5, "is such a big file"),
+ hasFileContentsRange("foo/big.txt", 6, "s such a big file"),
+ hasFileContentsRange("foo/big.txt", 7, " such a big file"),
+ hasFileContentsRange("foo/big.txt", 8, "such a big file"),
+ hasFileContentsRange("foo/big.txt", 9, "uch a big file"),
+ hasFileContentsRange("foo/big.txt", 10, "ch a big file"),
+ hasFileContentsRange("foo/big.txt", 11, "h a big file"),
+ hasFileContentsRange("foo/big.txt", 12, " a big file"),
+ hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""),
+ hasChunkEntries("foo/big.txt", 6),
+ ),
+ },
+ {
+ name: "recursive",
+ in: tarOf(
+ dir("/", sampleOwner),
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // dirs, bar.txt alone, TOC, footer
+ want: checks(
+ maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt"
+ ),
+ },
+ {
+ name: "block_char_fifo",
+ in: tarOf(
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "b",
+ Typeflag: tar.TypeBlock,
+ Devmajor: 123,
+ Devminor: 456,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "c",
+ Typeflag: tar.TypeChar,
+ Devmajor: 111,
+ Devminor: 222,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "f",
+ Typeflag: tar.TypeFifo,
+ Format: format,
+ })
+ }),
+ ),
+ wantNumGz: 3,
+ want: checks(
+ lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}),
+ lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}),
+ lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}),
+ ),
+ },
+ {
+ name: "modes",
+ in: tarOf(
+ dir("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ file("foo1/bar1", content, 0700|os.ModeSetuid),
+ file("foo1/bar2", content, 0755|os.ModeSetgid),
+ dir("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ file("foo2/bar3", content, 0755|os.ModeSticky),
+ dir("foo3/", 0755|os.ModeDir),
+ file("foo3/bar4", content, os.FileMode(0700)),
+ file("foo3/bar5", content, os.FileMode(0755)),
+ ),
+ wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer
+ want: checks(
+ hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ hasMode("foo1/bar1", 0700|os.ModeSetuid),
+ hasMode("foo1/bar2", 0755|os.ModeSetgid),
+ hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ hasMode("foo2/bar3", 0755|os.ModeSticky),
+ hasMode("foo3/", 0755|os.ModeDir),
+ hasMode("foo3/bar4", os.FileMode(0700)),
+ hasMode("foo3/bar5", os.FileMode(0755)),
+ ),
+ },
+ {
+ name: "lossy",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ wantFailOnLossLess: true,
+ },
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ file("foo22", "ccc"),
+ dir("bar/"),
+ file("bar/bar.txt", "aaa"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo22", len("ccc")),
+ hasFileLen("bar/bar.txt", len("aaa")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo22", digestFor("ccc")),
+ hasFileDigest("bar/bar.txt", digestFor("aaa")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo22", 0, "ccc"),
+ hasFileContentsRange("foo22", 1, "cc"),
+ hasFileContentsRange("foo22", 2, "c"),
+ hasFileContentsRange("bar/bar.txt", 0, "aaa"),
+ hasFileContentsRange("bar/bar.txt", 1, "aa"),
+ hasFileContentsRange("bar/bar.txt", 2, "a"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ dir("bar/"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
+ hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ }
+
+ for _, tt := range tests {
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, lossless := range []bool{true, false} {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
+ var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
+ origTarDgstr := digest.Canonical.Digester()
+ tr = io.TeeReader(tr, origTarDgstr.Hash())
+ var stargzBuf bytes.Buffer
+ cl1 := newCL()
+ w := NewWriterWithCompressor(&stargzBuf, cl1)
+ w.ChunkSize = tt.chunkSize
+ w.MinChunkSize = tt.minChunkSize
+ if lossless {
+ err := w.AppendTarLossLess(tr)
+ if tt.wantFailOnLossLess {
+ if err != nil {
+ return // expected to fail
+ }
+ t.Fatalf("Append wanted to fail on lossless")
+ }
+ if err != nil {
+ t.Fatalf("Append(lossless): %v", err)
+ }
+ } else {
+ if err := w.AppendTar(tr); err != nil {
+ t.Fatalf("Append: %v", err)
+ }
+ }
+ if _, err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ b := stargzBuf.Bytes()
+
+ if lossless {
+ // Check if the result blob reserves original tar metadata
+ rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
+ if err != nil {
+ t.Errorf("failed to decompress blob: %v", err)
+ return
+ }
+ defer rc.Close()
+ resultDgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
+ t.Errorf("failed to read result decompressed blob: %v", err)
+ return
+ }
+ if resultDgstr.Digest() != origTarDgstr.Digest() {
+ t.Errorf("lossy compression occurred: digest=%v; want %v",
+ resultDgstr.Digest(), origTarDgstr.Digest())
+ return
+ }
+ }
+
+ diffID := w.DiffID()
+ wantDiffID := cl1.DiffIDOf(t, b)
+ if diffID != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ telemetry, checkCalled := newCalledTelemetry()
+ sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+ r, err := Open(
+ sr,
+ WithDecompressors(cl1),
+ WithTelemetry(telemetry),
+ )
+ if err != nil {
+ t.Fatalf("stargz.Open: %v", err)
+ }
+ wantTOCVersion := 1
+ if tt.wantTOCVersion > 0 {
+ wantTOCVersion = tt.wantTOCVersion
+ }
+ if r.toc.Version != wantTOCVersion {
+ t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
+ }
+
+ footerSize := cl1.FooterSize()
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ t.Errorf("failed to read footer: %v", err)
+ }
+ _, tocOffset, _, err := cl1.ParseFooter(footer)
+ if err != nil {
+ t.Errorf("failed to parse footer: %v", err)
+ }
+ if err := checkCalled(tocOffset >= 0); err != nil {
+ t.Errorf("telemetry failure: %v", err)
+ }
+
+ wantNumGz := tt.wantNumGz
+ if lossless && tt.wantNumGzLossLess > 0 {
+ wantNumGz = tt.wantNumGzLossLess
+ }
+ streamOffsets := []int64{0}
+ prevOffset := int64(-1)
+ streams := 0
+ for _, e := range r.toc.Entries {
+ if e.Offset > prevOffset {
+ streamOffsets = append(streamOffsets, e.Offset)
+ prevOffset = e.Offset
+ streams++
+ }
+ }
+ streams++ // TOC
+ if tocOffset >= 0 {
+ // toc is in the blob
+ streamOffsets = append(streamOffsets, tocOffset)
+ }
+ streams++ // footer
+ streamOffsets = append(streamOffsets, footerOffset)
+ if streams != wantNumGz {
+ t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
+ }
+
+ t.Logf("testing streams: %+v", streamOffsets)
+ cl1.TestStreams(t, b, streamOffsets)
+
+ for _, want := range tt.want {
+ want.check(t, r)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+type chunkInfo struct {
+ name string
+ data string
+}
+
+func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
+ var getFooterLatencyCalled bool
+ var getTocLatencyCalled bool
+ var deserializeTocLatencyCalled bool
+ return &Telemetry{
+ func(time.Time) { getFooterLatencyCalled = true },
+ func(time.Time) { getTocLatencyCalled = true },
+ func(time.Time) { deserializeTocLatencyCalled = true },
+ }, func(needsGetTOC bool) error {
+ var allErr []error
+ if !getFooterLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
+ }
+ if needsGetTOC {
+ if !getTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
+ }
+ }
+ if !deserializeTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
+ }
+ return errorutil.Aggregate(allErr)
+ }
+}
+
+func digestFor(content string) string {
+ sum := sha256.Sum256([]byte(content))
+ return fmt.Sprintf("sha256:%x", sum)
+}
+
+type numTOCEntries int
+
+func (n numTOCEntries) check(t *testing.T, r *Reader) {
+ if r.toc == nil {
+ t.Fatal("nil TOC")
+ }
+ if got, want := len(r.toc.Entries), int(n); got != want {
+ t.Errorf("got %d TOC entries; want %d", got, want)
+ }
+ t.Logf("got TOC entries:")
+ for i, ent := range r.toc.Entries {
+ entj, _ := json.Marshal(ent)
+ t.Logf(" [%d]: %s\n", i, entj)
+ }
+ if t.Failed() {
+ t.FailNow()
+ }
+}
+
+func checks(s ...stargzCheck) []stargzCheck { return s }
+
+type stargzCheck interface {
+ check(t *testing.T, r *Reader)
+}
+
+type stargzCheckFn func(*testing.T, *Reader)
+
+func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
+
+func maxDepth(max int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup("")
+ if !ok {
+ t.Fatal("root directory not found")
+ }
+ d, err := getMaxDepth(t, e, 0, 10*max)
+ if err != nil {
+ t.Errorf("failed to get max depth (wanted %d): %v", max, err)
+ return
+ }
+ if d != max {
+ t.Errorf("invalid depth %d; want %d", d, max)
+ return
+ }
+ })
+}
+
+func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
+ if current > limit {
+ return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
+ current, limit)
+ }
+ max = current
+ e.ForeachChild(func(baseName string, ent *TOCEntry) bool {
+ t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name)
+ d, err := getMaxDepth(t, ent, current+1, limit)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if d > max {
+ max = d
+ }
+ return true
+ })
+ return
+}
+
+func hasFileLen(file string, wantLen int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ } else if ent.Size != int64(wantLen) {
+ t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen)
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileXattrs(file, name, value string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ }
+ if ent.Xattrs == nil {
+ t.Errorf("file %q has no xattrs", file)
+ return
+ }
+ valueFound, found := ent.Xattrs[name]
+ if !found {
+ t.Errorf("file %q has no xattr %q", file, name)
+ return
+ }
+ if string(valueFound) != value {
+ t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value)
+ }
+
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileDigest(file string, digest string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for file %q", file)
+ }
+ if ent.Digest != digest {
+ t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest)
+ }
+ })
+}
+
+func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ extraMap := make(map[string]chunkInfo)
+ for _, e := range extra {
+ extraMap[e.name] = e
+ }
+ var extraNames []string
+ for n := range extraMap {
+ extraNames = append(extraNames, n)
+ }
+ f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
+ t.Logf("On %q: got preread of %q", file, e.Name)
+ ex, ok := extraMap[e.Name]
+ if !ok {
+ t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
+ }
+ got, err := io.ReadAll(cr)
+ if err != nil {
+ t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
+ }
+ if ex.data != string(got) {
+ t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
+ }
+ delete(extraMap, e.Name)
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ if len(extraMap) != 0 {
+ var exNames []string
+ for _, ex := range extraMap {
+ exNames = append(exNames, ex.name)
+ }
+ t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
+ }
+ })
+}
+
+func hasFileContentsRange(file string, offset int, want string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ f, err := r.OpenFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ })
+}
+
+func hasChunkEntries(file string, wantChunks int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("no file for %q", file)
+ }
+ if ent.Type != "reg" {
+ t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type)
+ }
+ chunks := r.getChunks(ent)
+ if len(chunks) != wantChunks {
+ t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks)
+ return
+ }
+ f := chunks[0]
+
+ var gotChunks []*TOCEntry
+ var last *TOCEntry
+ for off := int64(0); off < f.Size; off++ {
+ e, ok := r.ChunkEntryForOffset(file, off)
+ if !ok {
+ t.Errorf("no ChunkEntryForOffset at %d", off)
+ return
+ }
+ if last != e {
+ gotChunks = append(gotChunks, e)
+ last = e
+ }
+ }
+ if !reflect.DeepEqual(chunks, gotChunks) {
+ t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks)
+ }
+
+ // And verify the NextOffset
+ for i := 0; i < len(gotChunks)-1; i++ {
+ ci := gotChunks[i]
+ cnext := gotChunks[i+1]
+ if ci.NextOffset() != cnext.Offset {
+ t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset)
+ }
+ }
+ })
+}
+
+func entryHasChildren(dir string, want ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ want := append([]string(nil), want...)
+ var got []string
+ ent, ok := r.Lookup(dir)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for dir node %q", dir)
+ }
+ for baseName := range ent.children {
+ got = append(got, baseName)
+ }
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("children of %q = %q; want %q", dir, got, want)
+ }
+ })
+}
+
+func hasDir(file string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasDirLinkCount(file string, count int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ return
+ }
+ if ent.NumLink != count {
+ t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasMode(file string, mode os.FileMode) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Stat().Mode() != mode {
+ t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode)
+ return
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasSymlink(file, target string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "symlink" {
+ t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type)
+ } else if ent.LinkName != target {
+ t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target)
+ }
+ return
+ }
+ }
+ t.Errorf("symlink %q not found", file)
+ })
+}
+
+func lookupMatch(name string, want *TOCEntry) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup(name)
+ if !ok {
+ t.Fatalf("failed to Lookup entry %q", name)
+ }
+ if !reflect.DeepEqual(e, want) {
+ t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want)
+ }
+
+ })
+}
+
+func hasEntryOwner(entry string, owner owner) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
+ if !ok {
+ t.Errorf("entry %q not found", entry)
+ return
+ }
+ if ent.UID != owner.uid || ent.GID != owner.gid {
+ t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid)
+ return
+ }
+ })
+}
+
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
+func viewContent(c []byte) string {
+ if len(c) < 100 {
+ return string(c)
+ }
+ return string(c[:50]) + "...(omit)..." + string(c[50:100])
+}
+
+func tarOf(s ...tarEntry) []tarEntry { return s }
+
+type tarEntry interface {
+ appendTar(tw *tar.Writer, prefix string, format tar.Format) error
+}
+
+type tarEntryFunc func(*tar.Writer, string, tar.Format) error
+
+func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
+ return f(tw, prefix, format)
+}
+
+func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+ format := tar.FormatUnknown
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case tar.Format:
+ format = v
+ default:
+ panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
+ }
+ }
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, ent := range ents {
+ if err := ent.appendTar(tw, prefix, format); err != nil {
+ t.Fatalf("building input tar: %v", err)
+ }
+ }
+ if err := tw.Close(); err != nil {
+ t.Errorf("closing write of input tar: %v", err)
+ }
+ data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+}
+
+func dir(name string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var o owner
+ mode := os.FileMode(0755)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if !strings.HasSuffix(name, "/") {
+ panic(fmt.Sprintf("missing trailing slash in dir %q ", name))
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeDir,
+ Name: prefix + name,
+ Mode: tm,
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ })
+ })
+}
+
+// xAttr are extended attributes to set on test files created with the file func.
+type xAttr map[string]string
+
+// owner is owner ot set on test files and directories with the file and dir functions.
+type owner struct {
+ uid int
+ gid int
+}
+
+func file(name, contents string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var xattrs xAttr
+ var o owner
+ mode := os.FileMode(0644)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case xAttr:
+ xattrs = v
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if strings.HasSuffix(name, "/") {
+ return fmt.Errorf("bogus trailing slash in file %q", name)
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ if len(xattrs) > 0 {
+ format = tar.FormatPAX // only PAX supports xattrs
+ }
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Mode: tm,
+ Xattrs: xattrs,
+ Size: int64(len(contents)),
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ _, err = io.WriteString(tw, contents)
+ return err
+ })
+}
+
+func symlink(name, target string) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeSymlink,
+ Name: prefix + name,
+ Linkname: target,
+ Mode: 0644,
+ Format: format,
+ })
+ })
+}
+
+func link(name string, linkname string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeLink,
+ Name: prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func chardev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeChar,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func blockdev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeBlock,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+func fifo(name string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeFifo,
+ Name: prefix + name,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func prefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func noPrefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
+ if digestMap == nil {
+ t.Fatalf("digest map mustn't be nil")
+ }
+ content := []byte(contentStr)
+
+ var n int64
+ for n < int64(len(content)) {
+ size := int64(chunkSize)
+ remain := int64(len(content)) - n
+ if remain < size {
+ size = remain
+ }
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil {
+ t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)",
+ string(content[n:n+size]), name, n, size)
+ }
+ digestMap[chunkID(name, n, size)] = dgstr.Digest()
+ n += size
+ }
+
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Size: int64(len(content)),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+func randomContents(n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = runes[rand.Intn(len(runes))]
+ }
+ return string(b)
+}
+
+func fileModeToTarMode(mode os.FileMode) (int64, error) {
+ h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
+ if err != nil {
+ return 0, err
+ }
+ return h.Mode, nil
+}
+
+// fileInfoOnlyMode is os.FileMode that populates only file mode.
+type fileInfoOnlyMode os.FileMode
+
+func (f fileInfoOnlyMode) Name() string { return "" }
+func (f fileInfoOnlyMode) Size() int64 { return 0 }
+func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
+func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
+func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
+func (f fileInfoOnlyMode) Sys() interface{} { return nil }
+
+func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
+ if len(streams) == 0 {
+ return // nop
+ }
+
+ wants := map[int64]struct{}{}
+ for _, s := range streams {
+ wants[s] = struct{}{}
+ }
+
+ len0 := len(b)
+ br := bytes.NewReader(b)
+ zr := new(gzip.Reader)
+ t.Logf("got gzip streams:")
+ numStreams := 0
+ for {
+ zoff := len0 - br.Len()
+ if err := zr.Reset(br); err != nil {
+ if err == io.EOF {
+ return
+ }
+ t.Fatalf("countStreams(gzip), Reset: %v", err)
+ }
+ zr.Multistream(false)
+ n, err := io.Copy(io.Discard, zr)
+ if err != nil {
+ t.Fatalf("countStreams(gzip), Copy: %v", err)
+ }
+ var extra string
+ if len(zr.Header.Extra) > 0 {
+ extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
+ }
+ t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
+ delete(wants, int64(zoff))
+ numStreams++
+ }
+}
+
+func GzipDiffIDOf(t *testing.T, b []byte) string {
+ h := sha256.New()
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("diffIDOf(gzip): %v", err)
+ }
+ defer zr.Close()
+ if _, err := io.Copy(h, zr); err != nil {
+ t.Fatalf("diffIDOf(gzip).Copy: %v", err)
+ }
+ return fmt.Sprintf("sha256:%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
new file mode 100644
index 000000000..57e0aa614
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -0,0 +1,342 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // TOCTarName is the name of the JSON file in the tar archive in the
+ // table of contents gzip stream.
+ TOCTarName = "stargz.index.json"
+
+ // FooterSize is the number of bytes in the footer
+ //
+ // The footer is an empty gzip stream with no compression and an Extra
+ // header of the form "%016xSTARGZ", where the 64 bit hex-encoded
+ // number is the offset to the gzip stream of JSON TOC.
+ //
+ // 51 comes from:
+ //
+ // 10 bytes gzip header
+ // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
+ // 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+ // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
+ // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
+ // 5 bytes flate header
+ // 8 bytes gzip footer
+ // (End of the eStargz blob)
+ //
+ // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
+ FooterSize = 51
+
+ // legacyFooterSize is the number of bytes in the legacy stargz footer.
+ //
+ // 47 comes from:
+ //
+ // 10 byte gzip header +
+ // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
+ // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
+ // 5 byte flate header
+ // 8 byte gzip footer (two little endian uint32s: digest, size)
+ legacyFooterSize = 47
+
+ // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
+ // digest of the TOC JSON.
+ // This annotation is valid only when it is specified in `.[]layers.annotations`
+ // of an image manifest.
+ TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
+
+ // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
+ // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
+ // to the runtime but current OCI image doesn't ship this information by default. So we store this
+ // to the special annotation.
+ StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
+
+ // PrefetchLandmark is a file entry which indicates the end position of
+ // prefetch in the stargz file.
+ PrefetchLandmark = ".prefetch.landmark"
+
+ // NoPrefetchLandmark is a file entry which indicates that no prefetch should
+ // occur in the stargz file.
+ NoPrefetchLandmark = ".no.prefetch.landmark"
+
+ landmarkContents = 0xf
+)
+
+// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
+type JTOC struct {
+ Version int `json:"version"`
+ Entries []*TOCEntry `json:"entries"`
+}
+
+// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
+type TOCEntry struct {
+ // Name is the tar entry's name. It is the complete path
+ // stored in the tar file, not just the base name.
+ Name string `json:"name"`
+
+ // Type is one of "dir", "reg", "symlink", "hardlink", "char",
+ // "block", "fifo", or "chunk".
+ // The "chunk" type is used for regular file data chunks past the first
+ // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
+ // ChunkOffset, and ChunkSize populated.
+ Type string `json:"type"`
+
+ // Size, for regular files, is the logical size of the file.
+ Size int64 `json:"size,omitempty"`
+
+ // ModTime3339 is the modification time of the tar entry. Empty
+ // means zero or unknown. Otherwise it's in UTC RFC3339
+ // format. Use the ModTime method to access the time.Time value.
+ ModTime3339 string `json:"modtime,omitempty"`
+ modTime time.Time
+
+ // LinkName, for symlinks and hardlinks, is the link target.
+ LinkName string `json:"linkName,omitempty"`
+
+ // Mode is the permission and mode bits.
+ Mode int64 `json:"mode,omitempty"`
+
+ // UID is the user ID of the owner.
+ UID int `json:"uid,omitempty"`
+
+ // GID is the group ID of the owner.
+ GID int `json:"gid,omitempty"`
+
+ // Uname is the username of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same UID.
+ Uname string `json:"userName,omitempty"`
+
+ // Gname is the group name of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same GID.
+ Gname string `json:"groupName,omitempty"`
+
+ // Offset, for regular files, provides the offset in the
+ // stargz file to the file's data bytes. See ChunkOffset and
+ // ChunkSize.
+ Offset int64 `json:"offset,omitempty"`
+
+ // InnerOffset is an optional field indicates uncompressed offset
+ // of this "reg" or "chunk" payload in a stream starts from Offset.
+ // This field enables to put multiple "reg" or "chunk" payloads
+ // in one chunk with having the same Offset but different InnerOffset.
+ InnerOffset int64 `json:"innerOffset,omitempty"`
+
+ nextOffset int64 // the Offset of the next entry with a non-zero Offset
+
+ // DevMajor is the major device number for "char" and "block" types.
+ DevMajor int `json:"devMajor,omitempty"`
+
+ // DevMinor is the major device number for "char" and "block" types.
+ DevMinor int `json:"devMinor,omitempty"`
+
+ // NumLink is the number of entry names pointing to this entry.
+ // Zero means one name references this entry.
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
+
+ // Xattrs are the extended attribute for the entry.
+ Xattrs map[string][]byte `json:"xattrs,omitempty"`
+
+ // Digest stores the OCI checksum for regular files payload.
+ // It has the form "sha256:abcdef01234....".
+ Digest string `json:"digest,omitempty"`
+
+ // ChunkOffset is non-zero if this is a chunk of a large,
+ // regular file. If so, the Offset is where the gzip header of
+ // ChunkSize bytes at ChunkOffset in Name begin.
+ //
+ // In serialized form, a "chunkSize" JSON field of zero means
+ // that the chunk goes to the end of the file. After reading
+ // from the stargz TOC, though, the ChunkSize is initialized
+ // to a non-zero file for when Type is either "reg" or
+ // "chunk".
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+
+ // ChunkDigest stores an OCI digest of the chunk. This must be formed
+ // as "sha256:0123abcd...".
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+
+ children map[string]*TOCEntry
+
+ // chunkTopIndex is index of the entry where Offset starts in the blob.
+ chunkTopIndex int
+}
+
+// ModTime returns the entry's modification time.
+func (e *TOCEntry) ModTime() time.Time { return e.modTime }
+
+// NextOffset returns the position (relative to the start of the
+// stargz file) of the next gzip boundary after e.Offset.
+func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
+
+func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
+ if e.children == nil {
+ e.children = make(map[string]*TOCEntry)
+ }
+ if child.Type == "dir" {
+ e.NumLink++ // Entry ".." in the subdirectory links to this directory
+ }
+ e.children[baseName] = child
+}
+
+// isDataType reports whether TOCEntry is a regular file or chunk (something that
+// contains regular file data).
+func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
+
+// Stat returns a FileInfo value representing e.
+func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
+
+// ForeachChild calls f for each child item. If f returns false, iteration ends.
+// If e is not a directory, f is not called.
+func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
+ for name, ent := range e.children {
+ if !f(name, ent) {
+ return
+ }
+ }
+}
+
+// LookupChild returns the directory e's child by its base name.
+func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
+ child, ok = e.children[baseName]
+ return
+}
+
+// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
+type fileInfo struct{ e *TOCEntry }
+
+var _ os.FileInfo = fileInfo{}
+
+func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
+func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
+func (fi fileInfo) Size() int64 { return fi.e.Size }
+func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
+func (fi fileInfo) Sys() interface{} { return fi.e }
+func (fi fileInfo) Mode() (m os.FileMode) {
+ // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
+ m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
+ (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
+ switch fi.e.Type {
+ case "dir":
+ m |= os.ModeDir
+ case "symlink":
+ m |= os.ModeSymlink
+ case "char":
+ m |= os.ModeDevice | os.ModeCharDevice
+ case "block":
+ m |= os.ModeDevice
+ case "fifo":
+ m |= os.ModeNamedPipe
+ }
+ return m
+}
+
+// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
+// in a eStargz blob.
+type TOCEntryVerifier interface {
+
+ // Verifier provides a content verifier that can be used for verifying the
+ // contents of the specified TOCEntry.
+ Verifier(ce *TOCEntry) (digest.Verifier, error)
+}
+
+// Compression provides the compression helper to be used creating and parsing eStargz.
+// This package provides gzip-based Compression by default, but any compression
+// algorithm (e.g. zstd) can be used as long as it implements Compression.
+type Compression interface {
+ Compressor
+ Decompressor
+}
+
+// Compressor represents the helper mothods to be used for creating eStargz.
+type Compressor interface {
+ // Writer returns WriteCloser to be used for writing a chunk to eStargz.
+ // Everytime a chunk is written, the WriteCloser is closed and Writer is
+ // called again for writing the next chunk.
+ //
+ // The returned writer should implement "Flush() error" function that flushes
+ // any pending compressed data to the underlying writer.
+ Writer(w io.Writer) (WriteFlushCloser, error)
+
+ // WriteTOCAndFooter is called to write JTOC to the passed Writer.
+ // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
+ // WriteTOCAndFooter can optionally write anything that affects DiffID calculation
+ // (e.g. uncompressed TOC JSON).
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob when it's parsed.
+ WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
+}
+
+// Decompressor represents the helper mothods to be used for parsing eStargz.
+type Decompressor interface {
+ // Reader returns ReadCloser to be used for decompressing file payload.
+ Reader(r io.Reader) (io.ReadCloser, error)
+
+ // FooterSize returns the size of the footer of this blob.
+ FooterSize() int64
+
+ // ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
+ // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
+ // the top until the TOC JSON).
+ //
+ // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
+ // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
+ //
+ // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
+ // footer (blob size - tocOff - FooterSize).
+ // If blobPayloadSize < 0, blobPayloadSize become the blob size.
+ ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
+
+ // ParseTOC parses TOC from the passed reader. The reader provides the partial contents
+ // of the underlying blob that has the range specified by ParseFooter method.
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob. This must match to the value returned from
+ // Compressor.WriteTOCAndFooter that is used when creating this blob.
+ //
+ // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
+ // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
+ // and return it.
+ ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
+}
+
+type WriteFlushCloser interface {
+ io.WriteCloser
+ Flush() error
+}
diff --git a/vendor/github.com/containerd/typeurl/v2/.gitignore b/vendor/github.com/containerd/typeurl/v2/.gitignore
new file mode 100644
index 000000000..d53846778
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/.gitignore
@@ -0,0 +1,2 @@
+*.test
+coverage.txt
diff --git a/vendor/github.com/containerd/typeurl/v2/LICENSE b/vendor/github.com/containerd/typeurl/v2/LICENSE
new file mode 100644
index 000000000..584149b6e
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md
new file mode 100644
index 000000000..8d86600a4
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/README.md
@@ -0,0 +1,20 @@
+# typeurl
+
+[](https://pkg.go.dev/github.com/containerd/typeurl)
+[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
+[](https://codecov.io/gh/containerd/typeurl)
+[](https://goreportcard.com/report/github.com/containerd/typeurl)
+
+A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
+
+This package helps when types are sent over a ttrpc/GRPC API and marshaled as a protobuf [Any](https://pkg.go.dev/google.golang.org/protobuf@v1.27.1/types/known/anypb#Any)
+
+## Project details
+
+**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/typeurl/v2/doc.go b/vendor/github.com/containerd/typeurl/v2/doc.go
new file mode 100644
index 000000000..c0d0fd205
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/doc.go
@@ -0,0 +1,83 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package typeurl
+
+// Package typeurl assists with managing the registration, marshaling, and
+// unmarshaling of types encoded as protobuf.Any.
+//
+// A protobuf.Any is a proto message that can contain any arbitrary data. It
+// consists of two components, a TypeUrl and a Value, and its proto definition
+// looks like this:
+//
+// message Any {
+// string type_url = 1;
+// bytes value = 2;
+// }
+//
+// The TypeUrl is used to distinguish the contents from other proto.Any
+// messages. This typeurl library manages these URLs to enable automagic
+// marshaling and unmarshaling of the contents.
+//
+// For example, consider this go struct:
+//
+// type Foo struct {
+// Field1 string
+// Field2 string
+// }
+//
+// To use typeurl, types must first be registered. This is typically done in
+// the init function
+//
+// func init() {
+// typeurl.Register(&Foo{}, "Foo")
+// }
+//
+// This will register the type Foo with the url path "Foo". The arguments to
+// Register are variadic, and are used to construct a url path. Consider this
+// example, from the github.com/containerd/containerd/client package:
+//
+// func init() {
+// const prefix = "types.containerd.io"
+// // register TypeUrls for commonly marshaled external types
+// major := strconv.Itoa(specs.VersionMajor)
+// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
+// // this function has more Register calls, which are elided.
+// }
+//
+// This registers several types under a more complex url, which ends up mapping
+// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
+// value for major).
+//
+// Once a type is registered, it can be marshaled to a proto.Any message simply
+// by calling `MarshalAny`, like this:
+//
+// foo := &Foo{Field1: "value1", Field2: "value2"}
+// anyFoo, err := typeurl.MarshalAny(foo)
+//
+// MarshalAny will resolve the correct URL for the type. If the type in
+// question implements the proto.Message interface, then it will be marshaled
+// as a proto message. Otherwise, it will be marshaled as json. This means that
+// typeurl will work on any arbitrary data, whether or not it has a proto
+// definition, as long as it can be serialized to json.
+//
+// To unmarshal, the process is simply inverse:
+//
+// iface, err := typeurl.UnmarshalAny(anyFoo)
+// foo := iface.(*Foo)
+//
+// The correct type is automatically chosen from the type registry, and the
+// returned interface can be cast straight to that type.
diff --git a/vendor/github.com/containerd/typeurl/v2/go.mod b/vendor/github.com/containerd/typeurl/v2/go.mod
new file mode 100644
index 000000000..cdf1922dc
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/go.mod
@@ -0,0 +1,8 @@
+module github.com/containerd/typeurl/v2
+
+go 1.21
+
+require (
+ github.com/gogo/protobuf v1.3.2
+ google.golang.org/protobuf v1.27.1
+)
diff --git a/vendor/github.com/containerd/typeurl/v2/go.sum b/vendor/github.com/containerd/typeurl/v2/go.sum
new file mode 100644
index 000000000..f26899198
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/go.sum
@@ -0,0 +1,38 @@
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go
new file mode 100644
index 000000000..78817b701
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/types.go
@@ -0,0 +1,298 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package typeurl
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "path"
+ "reflect"
+ "sync"
+
+ gogoproto "github.com/gogo/protobuf/proto"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+var (
+ mu sync.RWMutex
+ registry = make(map[reflect.Type]string)
+)
+
+// Definitions of common error types used throughout typeurl.
+//
+// These error types are used with errors.Wrap and errors.Wrapf to add context
+// to an error.
+//
+// To detect an error class, use errors.Is() functions to tell whether an
+// error is of this type.
+
+var (
+ ErrNotFound = errors.New("not found")
+)
+
+// Any contains an arbitrary protcol buffer message along with its type.
+//
+// While there is google.golang.org/protobuf/types/known/anypb.Any,
+// we'd like to have our own to hide the underlying protocol buffer
+// implementations from containerd clients.
+//
+// https://developers.google.com/protocol-buffers/docs/proto3#any
+type Any interface {
+ // GetTypeUrl returns a URL/resource name that uniquely identifies
+ // the type of the serialized protocol buffer message.
+ GetTypeUrl() string
+
+ // GetValue returns a valid serialized protocol buffer of the type that
+ // GetTypeUrl() indicates.
+ GetValue() []byte
+}
+
+type anyType struct {
+ typeURL string
+ value []byte
+}
+
+func (a *anyType) GetTypeUrl() string {
+ if a == nil {
+ return ""
+ }
+ return a.typeURL
+}
+
+func (a *anyType) GetValue() []byte {
+ if a == nil {
+ return nil
+ }
+ return a.value
+}
+
+// Register a type with a base URL for JSON marshaling. When the MarshalAny and
+// UnmarshalAny functions are called they will treat the Any type value as JSON.
+// To use protocol buffers for handling the Any value the proto.Register
+// function should be used instead of this function.
+func Register(v interface{}, args ...string) {
+ var (
+ t = tryDereference(v)
+ p = path.Join(args...)
+ )
+ mu.Lock()
+ defer mu.Unlock()
+ if et, ok := registry[t]; ok {
+ if et != p {
+ panic(fmt.Errorf("type registered with alternate path %q != %q", et, p))
+ }
+ return
+ }
+ registry[t] = p
+}
+
+// TypeURL returns the type url for a registered type.
+func TypeURL(v interface{}) (string, error) {
+ mu.RLock()
+ u, ok := registry[tryDereference(v)]
+ mu.RUnlock()
+ if !ok {
+ switch t := v.(type) {
+ case proto.Message:
+ return string(t.ProtoReflect().Descriptor().FullName()), nil
+ case gogoproto.Message:
+ return gogoproto.MessageName(t), nil
+ default:
+ return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
+ }
+ }
+ return u, nil
+}
+
+// Is returns true if the type of the Any is the same as v.
+func Is(any Any, v interface{}) bool {
+ if any == nil {
+ return false
+ }
+ // call to check that v is a pointer
+ tryDereference(v)
+ url, err := TypeURL(v)
+ if err != nil {
+ return false
+ }
+ return any.GetTypeUrl() == url
+}
+
+// MarshalAny marshals the value v into an any with the correct TypeUrl.
+// If the provided object is already a proto.Any message, then it will be
+// returned verbatim. If it is of type proto.Message, it will be marshaled as a
+// protocol buffer. Otherwise, the object will be marshaled to json.
+func MarshalAny(v interface{}) (Any, error) {
+ var marshal func(v interface{}) ([]byte, error)
+ switch t := v.(type) {
+ case Any:
+ // avoid reserializing the type if we have an any.
+ return t, nil
+ case proto.Message:
+ marshal = func(v interface{}) ([]byte, error) {
+ return proto.Marshal(t)
+ }
+ case gogoproto.Message:
+ marshal = func(v interface{}) ([]byte, error) {
+ return gogoproto.Marshal(t)
+ }
+ default:
+ marshal = json.Marshal
+ }
+
+ url, err := TypeURL(v)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return &anyType{
+ typeURL: url,
+ value: data,
+ }, nil
+}
+
+// UnmarshalAny unmarshals the any type into a concrete type.
+func UnmarshalAny(any Any) (interface{}, error) {
+ return UnmarshalByTypeURL(any.GetTypeUrl(), any.GetValue())
+}
+
+// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
+func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
+ return unmarshal(typeURL, value, nil)
+}
+
+// UnmarshalTo unmarshals the any type into a concrete type passed in the out
+// argument. It is identical to UnmarshalAny, but lets clients provide a
+// destination type through the out argument.
+func UnmarshalTo(any Any, out interface{}) error {
+ return UnmarshalToByTypeURL(any.GetTypeUrl(), any.GetValue(), out)
+}
+
+// UnmarshalToByTypeURL unmarshals the given type and value into a concrete type passed
+// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
+// provide a destination type through the out argument.
+func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
+ _, err := unmarshal(typeURL, value, out)
+ return err
+}
+
+// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
+func MarshalProto(from Any) *anypb.Any {
+ if from == nil {
+ return nil
+ }
+
+ if pbany, ok := from.(*anypb.Any); ok {
+ return pbany
+ }
+
+ return &anypb.Any{
+ TypeUrl: from.GetTypeUrl(),
+ Value: from.GetValue(),
+ }
+}
+
+// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
+func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
+ anyType, err := MarshalAny(from)
+ if err != nil {
+ return nil, err
+ }
+ return MarshalProto(anyType), nil
+}
+
+func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
+ t, err := getTypeByUrl(typeURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if v == nil {
+ v = reflect.New(t.t).Interface()
+ } else {
+ // Validate interface type provided by client
+ vURL, err := TypeURL(v)
+ if err != nil {
+ return nil, err
+ }
+ if typeURL != vURL {
+ return nil, fmt.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
+ }
+ }
+
+ if t.isProto {
+ switch t := v.(type) {
+ case proto.Message:
+ err = proto.Unmarshal(value, t)
+ case gogoproto.Message:
+ err = gogoproto.Unmarshal(value, t)
+ }
+ } else {
+ err = json.Unmarshal(value, v)
+ }
+
+ return v, err
+}
+
+type urlType struct {
+ t reflect.Type
+ isProto bool
+}
+
+func getTypeByUrl(url string) (urlType, error) {
+ mu.RLock()
+ for t, u := range registry {
+ if u == url {
+ mu.RUnlock()
+ return urlType{
+ t: t,
+ }, nil
+ }
+ }
+ mu.RUnlock()
+ // fallback to proto registry
+ t := gogoproto.MessageType(url)
+ if t != nil {
+ return urlType{
+ // get the underlying Elem because proto returns a pointer to the type
+ t: t.Elem(),
+ isProto: true,
+ }, nil
+ }
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
+ }
+ empty := mt.New().Interface()
+ return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
+}
+
+func tryDereference(v interface{}) reflect.Type {
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Ptr {
+ // require check of pointer but dereference to register
+ return t.Elem()
+ }
+ panic("v is not a pointer to a type")
+}
diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go
new file mode 100644
index 000000000..8d5580d7c
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/blob.go
@@ -0,0 +1,187 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps (de/re/)compressing it if canModifyBlob,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
+ isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers;
+ // that pipeline is built by updating stream.
+ // === Input: srcReader
+ stream := sourceStream{
+ reader: srcReader,
+ info: srcInfo,
+ }
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = digestingReader
+
+ // === Update progress bars
+ stream.reader = bar.ProxyReader(stream.reader)
+
+ // === Decrypt the stream, if required.
+ decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
+ originalLayerReader = stream.reader
+ }
+
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
+ // === Deal with layer compression/decompression if necessary
+ compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer compressionStep.close()
+
+ // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
+ if decryptionStep.decrypting && toEncrypt {
+ // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
+ // Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
+ return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
+ }
+ encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Report progress using the ic.c.options.Progress channel, if required.
+ if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
+ progressReader := newProgressReader(
+ stream.reader,
+ ic.c.options.Progress,
+ ic.c.options.ProgressInterval,
+ srcInfo,
+ )
+ defer progressReader.reportDone()
+ stream.reader = progressReader
+ }
+
+ // === Finally, send the layer stream to dest.
+ options := private.PutBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ IsConfig: isConfig,
+ EmptyLayer: emptyLayer,
+ }
+ if !isConfig {
+ options.LayerIndex = &layerIndex
+ }
+ destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
+ }
+ uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
+
+ compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
+ decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
+ if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(io.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
+ }
+ if digestingReader.validationSucceeded {
+ if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
+ return types.BlobInfo{}, err
+ }
+ }
+
+ return uploadedInfo, nil
+}
+
+// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
+// This allows handles of individual aspects to build the copy pipeline without _too much_
+// specific cooperation by the caller.
+//
+// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
+// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
+type sourceStream struct {
+ reader io.Reader
+ info types.BlobInfo // corresponding to the data available in reader.
+}
+
+// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
+// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
+type errorAnnotationReader struct {
+ reader io.Reader
+}
+
+// Read annotates the error happened during read
+func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
+ n, err = r.reader.Read(b)
+ if err != nil && err != io.EOF {
+ return n, fmt.Errorf("happened during read: %w", err)
+ }
+ return n, err
+}
+
+// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
+func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
+ // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
+ // Handling of compression, encryption, and the related MIME types and the like are all the responsibility
+ // of the generic code in this package.
+ return types.BlobInfo{
+ Digest: uploadedBlob.Digest,
+ Size: uploadedBlob.Size,
+ URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
+ Annotations: inputInfo.Annotations,
+ MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
+ CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
+ CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
+ CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go
new file mode 100644
index 000000000..fb5e1b174
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/compression.go
@@ -0,0 +1,434 @@
+package copy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+
+ internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ chunkedToc "github.com/containers/storage/pkg/chunked/toc"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // defaultCompressionFormat is used if the destination transport requests
+ // compression, and the user does not explicitly instruct us to use an algorithm.
+ defaultCompressionFormat = &compression.Gzip
+
+ // compressionBufferSize is the buffer size used to compress a blob
+ compressionBufferSize = 1048576
+
+ // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed
+ // using the algorithm that the media type says it should be compressed with
+ expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{
+ imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
+ imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
+ manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
+ }
+)
+
+// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression†step.
+type bpDetectCompressionStepData struct {
+ isCompressed bool
+ format compressiontypes.Algorithm // Valid if isCompressed
+ decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
+ srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob.
+}
+
+// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
+// srcInfo is only used for error messages.
+// Returns data for other steps.
+func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
+ // This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = reader
+
+ if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
+ tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
+ if err != nil {
+ return bpDetectCompressionStepData{}, err
+ }
+ if tocDigest != nil {
+ format = compression.ZstdChunked
+ }
+
+ }
+ res := bpDetectCompressionStepData{
+ isCompressed: decompressor != nil,
+ format: format,
+ decompressor: decompressor,
+ }
+ if res.isCompressed {
+ res.srcCompressorBaseVariantName = format.BaseVariantName()
+ } else {
+ res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed
+ }
+
+ if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
+ logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name())
+ }
+ return res, nil
+}
+
+// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
+type bpCompressionStepData struct {
+ operation bpcOperation // What we are actually doing
+ uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
+ uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
+ uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
+ srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
+ uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
+ uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
+ closers []io.Closer // Objects to close after the upload is done, if any.
+}
+
+type bpcOperation int
+
+const (
+ bpcOpInvalid bpcOperation = iota
+ bpcOpPreserveOpaque // We are preserving something where compression is not applicable
+ bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer
+ bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer
+ bpcOpCompressUncompressed // We are compressing uncompressed data
+ bpcOpRecompressCompressed // We are recompressing compressed data
+ bpcOpDecompressCompressed // We are decompressing compressed data
+)
+
+// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
+// and must eventually call close.
+func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
+ detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
+ if !layerCompressionChangeSupported {
+ logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
+ }
+ if canModifyBlob && layerCompressionChangeSupported {
+ for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
+ ic.bpcPreserveEncrypted,
+ ic.bpcCompressUncompressed,
+ ic.bpcRecompressCompressed,
+ ic.bpcDecompressCompressed,
+ } {
+ res, err := fn(stream, detected)
+ if err != nil {
+ return nil, err
+ }
+ if res != nil {
+ return res, nil
+ }
+ }
+ }
+ return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
+}
+
+// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) {
+ // We can’t do anything with an encrypted blob unless decrypted.
+ logrus.Debugf("Using original blob without modification for encrypted blob")
+ return &bpCompressionStepData{
+ operation: bpcOpPreserveOpaque,
+ uploadedOperation: types.PreserveOriginal,
+ uploadedAlgorithm: nil,
+ srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
+ logrus.Debugf("Compressing blob on the fly")
+ var uploadedAlgorithm *compressiontypes.Algorithm
+ if ic.compressionFormat != nil {
+ uploadedAlgorithm = ic.compressionFormat
+ } else {
+ uploadedAlgorithm = defaultCompressionFormat
+ }
+
+ reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
+ // Note: reader must be closed on all return paths.
+ stream.reader = reader
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ specificVariantName := uploadedAlgorithm.Name()
+ if specificVariantName == uploadedAlgorithm.BaseVariantName() {
+ specificVariantName = internalblobinfocache.UnknownCompression
+ }
+ return &bpCompressionStepData{
+ operation: bpcOpCompressUncompressed,
+ uploadedOperation: types.Compress,
+ uploadedAlgorithm: uploadedAlgorithm,
+ uploadedAnnotations: annotations,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
+ uploadedCompressorSpecificVariantName: specificVariantName,
+ closers: []io.Closer{reader},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
+ ic.compressionFormat != nil &&
+ (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) {
+ // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
+ // re-compressed using the desired format.
+ logrus.Debugf("Blob will be converted")
+
+ decompressed, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ decompressed.Close()
+ }
+ }()
+
+ recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
+ // Note: recompressed must be closed on all return paths.
+ stream.reader = recompressed
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ Digest: "",
+ Size: -1,
+ }
+ specificVariantName := ic.compressionFormat.Name()
+ if specificVariantName == ic.compressionFormat.BaseVariantName() {
+ specificVariantName = internalblobinfocache.UnknownCompression
+ }
+ succeeded = true
+ return &bpCompressionStepData{
+ operation: bpcOpRecompressCompressed,
+ uploadedOperation: types.PreserveOriginal,
+ uploadedAlgorithm: ic.compressionFormat,
+ uploadedAnnotations: annotations,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
+ uploadedCompressorSpecificVariantName: specificVariantName,
+ closers: []io.Closer{decompressed, recompressed},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
+ logrus.Debugf("Blob will be decompressed")
+ s, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ // Note: s must be closed on all return paths.
+ stream.reader = s
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: bpcOpDecompressCompressed,
+ uploadedOperation: types.Decompress,
+ uploadedAlgorithm: nil,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
+ closers: []io.Closer{s},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
+// This does not change the sourceStream parameter; we include it for symmetry with other
+// pipeline steps.
+func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
+ layerCompressionChangeSupported bool) *bpCompressionStepData {
+ logrus.Debugf("Using original blob without modification")
+ // Remember if the original blob was compressed, and if so how, so that if
+ // LayerInfosForCopy() returned something that differs from what was in the
+ // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
+ // it will be able to correctly derive the MediaType for the copied blob.
+ //
+ // But don’t touch blobs in objects where we can’t change compression,
+ // so that src.UpdatedImage() doesn’t fail; assume that for such blobs
+ // LayerInfosForCopy() should not be making any changes in the first place.
+ var bpcOp bpcOperation
+ var uploadedOp types.LayerCompression
+ var algorithm *compressiontypes.Algorithm
+ switch {
+ case !layerCompressionChangeSupported:
+ bpcOp = bpcOpPreserveOpaque
+ uploadedOp = types.PreserveOriginal
+ algorithm = nil
+ case detected.isCompressed:
+ bpcOp = bpcOpPreserveCompressed
+ uploadedOp = types.PreserveOriginal
+ algorithm = &detected.format
+ default:
+ bpcOp = bpcOpPreserveUncompressed
+ uploadedOp = types.Decompress
+ algorithm = nil
+ }
+ return &bpCompressionStepData{
+ operation: bpcOp,
+ uploadedOperation: uploadedOp,
+ uploadedAlgorithm: algorithm,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ // We only record the base variant of the format on upload; we didn’t do anything with
+ // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
+ // reuse of any kind between the blob digest and the TOC digest.
+ uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
+ }
+}
+
+// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
+func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
+ *operation = d.uploadedOperation
+ // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
+ *algorithm = d.uploadedAlgorithm
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ maps.Copy(*annotations, d.uploadedAnnotations)
+}
+
+// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
+// and the original srcInfo (which the caller guarantees has been validated).
+// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
+func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
+ encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptionStep.encrypting && !decryptionStep.decrypting {
+ // If d.operation != bpcOpPreserve*, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
+ // (because we set stream.info.Digest == "", this must have been computed afresh).
+ switch d.operation {
+ case bpcOpPreserveOpaque:
+ // No useful information
+ case bpcOpCompressUncompressed:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ if d.uploadedAnnotations != nil {
+ tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations)
+ if err != nil {
+ return fmt.Errorf("parsing just-created compression annotations: %w", err)
+ }
+ if tocDigest != nil {
+ c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest)
+ }
+ }
+ case bpcOpDecompressCompressed:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
+ // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest,
+ // and we don’t know that one.
+ // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless
+ // RecordDigestUncompressedPair was called for both compressed variants for some other reason).
+ case bpcOpPreserveUncompressed:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest)
+ case bpcOpInvalid:
+ fallthrough
+ default:
+ return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
+ }
+ }
+ if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
+ return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
+ d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
+ }
+ if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
+ BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
+ SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
+ SpecificVariantAnnotations: d.uploadedAnnotations,
+ })
+ }
+ if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
+ d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
+ // If the source is already using some TOC-dependent variant, we either copied the
+ // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
+ // so record neither the variant name, nor the TOC digest.
+ c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
+ BaseVariantCompressor: d.srcCompressorBaseVariantName,
+ SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
+ })
+ }
+ return nil
+}
+
+// close closes objects that carry state throughout the compression/decompression operation.
+func (d *bpCompressionStepData) close() {
+ for _, c := range d.closers {
+ c.Close()
+ }
+}
+
+// doCompression reads all input from src and writes its compressed equivalent to dest.
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
+ compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, compressionBufferSize)
+
+ _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
+ if err != nil {
+ compressor.Close()
+ return err
+ }
+
+ return compressor.Close()
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
+}
+
+// compressedStream returns a stream the input reader compressed using format, and a metadata map.
+// The caller must close the returned reader.
+// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
+func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
+ pipeReader, pipeWriter := io.Pipe()
+ annotations := map[string]string{}
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
+ return pipeReader, annotations
+}
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index ed76283f9..5b26abb09 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -1,52 +1,35 @@
package copy
import (
- "bytes"
"context"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
- "reflect"
- "strings"
- "sync"
+ "slices"
"time"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
- "github.com/containers/image/v5/internal/pkg/platform"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagesource"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
- "github.com/containers/image/v5/pkg/compression"
+ compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/signer"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vbauerster/mpb/v6"
- "github.com/vbauerster/mpb/v6/decor"
- "golang.org/x/crypto/ssh/terminal"
"golang.org/x/sync/semaphore"
+ "golang.org/x/term"
)
-type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
- validationSucceeded bool
-}
-
-// FIXME: disable early layer commits temporarily until a solid solution to
-// address #1205 has been found.
-const enableEarlyCommit = false
-
var (
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
@@ -56,90 +39,6 @@ var (
maxParallelDownloads = uint(6)
)
-// compressionBufferSize is the buffer size used to compress a blob
-var compressionBufferSize = 1048576
-
-// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
-// using the algorithm that the media type says it should be compressed with
-var expectedCompressionFormats = map[string]*compression.Algorithm{
- imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
- imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
- manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
-}
-
-// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
-// (neither is set if EOF is never reached).
-func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
- var digester digest.Digester
- if err := expectedDigest.Validate(); err != nil {
- return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
- }
- digestAlgorithm := expectedDigest.Algorithm()
- if !digestAlgorithm.Available() {
- return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
- }
- digester = digestAlgorithm.Digester()
-
- return &digestingReader{
- source: source,
- digester: digester,
- expectedDigest: expectedDigest,
- validationFailed: false,
- }, nil
-}
-
-func (d *digestingReader) Read(p []byte) (int, error) {
- n, err := d.source.Read(p)
- if n > 0 {
- if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
- // Coverage: This should not happen, the hash.Hash interface requires
- // d.digest.Write to never return an error, and the io.Writer interface
- // requires n2 == len(input) if no error is returned.
- return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
- }
- }
- if err == io.EOF {
- actualDigest := d.digester.Digest()
- if actualDigest != d.expectedDigest {
- d.validationFailed = true
- return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
- }
- d.validationSucceeded = true
- }
- return n, err
-}
-
-// copier allows us to keep track of diffID values for blobs, and other
-// data shared across one or more images in a possible manifest list.
-type copier struct {
- dest types.ImageDestination
- rawSource types.ImageSource
- reportWriter io.Writer
- progressOutput io.Writer
- progressInterval time.Duration
- progress chan types.ProgressProperties
- blobInfoCache internalblobinfocache.BlobInfoCache2
- copyInParallel bool
- compressionFormat compression.Algorithm
- compressionLevel *int
- ociDecryptConfig *encconfig.DecryptConfig
- ociEncryptConfig *encconfig.EncryptConfig
- maxParallelDownloads uint
- downloadForeignLayers bool
-}
-
-// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
-type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
- canSubstituteBlobs bool
- ociEncryptLayers *[]int
-}
-
const (
// CopySystemImage is the default value which, when set in
// Options.ImageListSelection, indicates that the caller expects only one
@@ -173,21 +72,36 @@ type ImageListSelection int
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
- RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
- SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature.
+ // Signers to use to add signatures during the copy.
+ // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row.
+ Signers []*signer.Signer
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`.
+ SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path.
+ SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`.
+ SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
+
ReportWriter io.Writer
SourceCtx *types.SystemContext
DestinationCtx *types.SystemContext
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
- // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
+
+ // Preserve digests, and fail if we cannot.
+ PreserveDigests bool
+ // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type
ForceManifestMIMEType string
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
+ // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue,
+ // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time)
+ // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers).
+ // This only affects CopySystemImage.
+ PreferGzipInstances types.OptionalBool
+
// If OciEncryptConfig is non-nil, it indicates that an image should be encrypted.
// The encryption options is derived from the construction of EncryptConfig object.
- // Note: During initial encryption process of a layer, the resultant digest is not known
- // during creation, so newDigestingReader has to be set with validateDigest = false
OciEncryptConfig *encconfig.EncryptConfig
// OciEncryptLayers represents the list of layers to encrypt.
// If nil, don't encrypt any layers.
@@ -198,36 +112,84 @@ type Options struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
- // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
+
+ // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored.
+ ConcurrentBlobCopiesSemaphore *semaphore.Weighted
+
+ // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set.
MaxParallelDownloads uint
+
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
// exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
// is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
OptimizeDestinationImageAlreadyExists bool
+
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
// to not indicate "nondistributable".
DownloadForeignLayers bool
+
+ // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform
+ // in the manifest list, a variant with the requested compression will exist.
+ // Invalid when copying a non-multi-architecture image. That will probably
+ // change in the future.
+ EnsureCompressionVariantsExist []OptionCompressionVariant
+ // ForceCompressionFormat ensures that the compression algorithm set in
+ // DestinationCtx.CompressionFormat is used exclusively, and blobs of other
+ // compression algorithms are not reused.
+ ForceCompressionFormat bool
+
+ // ReportResolvedReference, if set, asks the destination transport to store
+ // a “resolved†(more detailed) reference to the created image
+ // into the value this option points to.
+ // What “resolved†means is transport-specific.
+ // Most transports don’t support this, and cause the value to be set to nil.
+ //
+ // For the containers-storage: transport, the reference contains an image ID,
+ // so that storage.ResolveReference returns exactly the created image.
+ // WARNING: It is unspecified whether the reference also contains a reference.Named element.
+ ReportResolvedReference *types.ImageReference
}
-// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
-func validateImageListSelection(selection ImageListSelection) error {
- switch selection {
- case CopySystemImage, CopyAllImages, CopySpecificImages:
- return nil
- default:
- return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection)
+// OptionCompressionVariant allows to supply information about
+// selected compression algorithm and compression level by the
+// end-user. Refer to EnsureCompressionVariantsExist to know
+// more about its usage.
+type OptionCompressionVariant struct {
+ Algorithm compression.Algorithm
+ Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance
+}
+
+// copier allows us to keep track of diffID values for blobs, and other
+// data shared across one or more images in a possible manifest list.
+// The owner must call close() when done.
+type copier struct {
+ policyContext *signature.PolicyContext
+ dest private.ImageDestination
+ rawSource private.ImageSource
+ options *Options // never nil
+
+ reportWriter io.Writer
+ progressOutput io.Writer
+
+ unparsedToplevel *image.UnparsedImage // for rawSource
+ blobInfoCache internalblobinfocache.BlobInfoCache2
+ concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
+ signers []*signer.Signer // Signers to use to create new signatures for the image
+ signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
+}
+
+// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
+func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
+ if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
+ return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
}
+ return options.ForceCompressionFormat, nil
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
- // NOTE this function uses an output parameter for the error return value.
- // Setting this and returning is the ideal way to return an error.
- //
- // the defers in this routine will wrap the error return with its own errors
- // which can be valuable context in the middle of a multi-streamed copy.
if options == nil {
options = &Options{}
}
@@ -236,563 +198,167 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
return nil, err
}
- reportWriter := ioutil.Discard
+ reportWriter := io.Discard
if options.ReportWriter != nil {
reportWriter = options.ReportWriter
}
- dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
+ // safeClose amends retErr with an error from c.Close(), if any.
+ safeClose := func(name string, c io.Closer) {
+ err := c.Close()
+ if err == nil {
+ return
+ }
+ // Do not use %w for err as we don't want it to be unwrapped by callers.
+ if retErr != nil {
+ retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr)
+ } else {
+ retErr = fmt.Errorf(" (%s: %s)", name, err.Error())
+ }
+ }
+
+ publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
if err != nil {
- return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
+ return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
}
- defer func() {
- if err := dest.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (dest: %v)", err)
- }
- }()
+ dest := imagedestination.FromPublic(publicDest)
+ defer safeClose("dest", dest)
- rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
+ publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
if err != nil {
- return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
}
- defer func() {
- if err := rawSource.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (src: %v)", err)
- }
- }()
+ rawSource := imagesource.FromPublic(publicRawSource)
+ defer safeClose("src", rawSource)
// If reportWriter is not a TTY (e.g., when piping to a file), do not
// print the progress bars to avoid long and hard to parse output.
- // createProgressBar() will print a single line instead.
+ // Instead use printCopyInfo() to print single line "Copying ..." messages.
progressOutput := reportWriter
if !isTTY(reportWriter) {
- progressOutput = ioutil.Discard
+ progressOutput = io.Discard
}
- copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{
- dest: dest,
- rawSource: rawSource,
- reportWriter: reportWriter,
- progressOutput: progressOutput,
- progressInterval: options.ProgressInterval,
- progress: options.Progress,
- copyInParallel: copyInParallel,
+ policyContext: policyContext,
+ dest: dest,
+ rawSource: rawSource,
+ options: options,
+
+ reportWriter: reportWriter,
+ progressOutput: progressOutput,
+
+ unparsedToplevel: image.UnparsedInstance(rawSource, nil),
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
- // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
- // we might want to add a separate CommonCtx — or would that be too confusing?
- blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
- ociDecryptConfig: options.OciDecryptConfig,
- ociEncryptConfig: options.OciEncryptConfig,
- maxParallelDownloads: options.MaxParallelDownloads,
- downloadForeignLayers: options.DownloadForeignLayers,
- }
- // Default to using gzip compression unless specified otherwise.
- if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
- algo, err := compression.AlgorithmByName("gzip")
- if err != nil {
- return nil, err
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more).
+ // Conceptually the cache settings should be in copy.Options instead.
+ blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
+ }
+ defer c.close()
+ c.blobInfoCache.Open()
+ defer c.blobInfoCache.Close()
+
+ // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
+ if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
+ c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore
+ if c.concurrentBlobCopiesSemaphore == nil {
+ max := c.options.MaxParallelDownloads
+ if max == 0 {
+ max = maxParallelDownloads
+ }
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max))
}
- c.compressionFormat = algo
} else {
- c.compressionFormat = *options.DestinationCtx.CompressionFormat
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
+ if c.options.ConcurrentBlobCopiesSemaphore != nil {
+ if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
+ }
+ defer c.options.ConcurrentBlobCopiesSemaphore.Release(1)
+ }
}
- if options.DestinationCtx != nil {
- // Note that the compressionLevel can be nil.
- c.compressionLevel = options.DestinationCtx.CompressionLevel
+
+ if err := c.setupSigners(); err != nil {
+ return nil, err
}
- unparsedToplevel := image.UnparsedInstance(rawSource, nil)
- multiImage, err := isMultiImage(ctx, unparsedToplevel)
+ multiImage, err := isMultiImage(ctx, c.unparsedToplevel)
if err != nil {
- return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
}
if !multiImage {
+ if len(options.EnsureCompressionVariantsExist) > 0 {
+ return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
+ }
+ requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
+ return nil, err
+ }
// The simple case: just copy a single image.
- if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil {
+ single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
+ if err != nil {
+ return nil, err
+ }
+ copiedManifest = single.manifest
+ } else if c.options.ImageListSelection == CopySystemImage {
+ if len(options.EnsureCompressionVariantsExist) > 0 {
+ return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
+ }
+ requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
return nil, err
}
- } else if options.ImageListSelection == CopySystemImage {
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
// matches the current system to copy, and copy it.
- mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
+ mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
- manifestList, err := manifest.ListFromBlob(mfest, manifestType)
+ manifestList, err := internalManifest.ListFromBlob(mfest, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
- instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
+ instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx
if err != nil {
- return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
-
- if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
- return nil, err
+ single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
+ if err != nil {
+ return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}
- } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
+ copiedManifest = single.manifest
+ } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */
// If we were asked to copy multiple images and can't, that's an error.
if !supportsMultipleImages(c.dest) {
- return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
+ return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
}
// Copy some or all of the images.
- switch options.ImageListSelection {
+ switch c.options.ImageListSelection {
case CopyAllImages:
logrus.Debugf("Source is a manifest list; copying all instances")
case CopySpecificImages:
logrus.Debugf("Source is a manifest list; copying some instances")
}
- if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
+ if copiedManifest, err = c.copyMultipleImages(ctx); err != nil {
return nil, err
}
}
- if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
- return nil, errors.Wrap(err, "Error committing the finished image")
- }
-
- return copiedManifest, nil
-}
-
-// Checks if the destination supports accepting multiple images by checking if it can support
-// manifest types that are lists of other manifests.
-func supportsMultipleImages(dest types.ImageDestination) bool {
- mtypes := dest.SupportedManifestMIMETypes()
- if len(mtypes) == 0 {
- // Anything goes!
- return true
- }
- for _, mtype := range mtypes {
- if manifest.MIMETypeIsMultiImage(mtype) {
- return true
- }
- }
- return false
-}
-
-// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
-// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
-func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
- srcManifest, _, err := src.Manifest(ctx)
- if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "Error reading manifest from image")
- }
-
- srcManifestDigest, err := manifest.Digest(srcManifest)
- if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
- }
-
- destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
- if err != nil {
- logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
- return false, nil, "", "", nil
- }
-
- destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
- if err != nil {
- logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
- return false, nil, "", "", nil
- }
-
- destManifestDigest, err := manifest.Digest(destManifest)
- if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
- }
-
- logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
- if srcManifestDigest != destManifestDigest {
- return false, nil, "", "", nil
- }
-
- // Destination and source manifests, types and digests should all be equivalent
- return true, destManifest, destManifestType, destManifestDigest, nil
-}
-
-// copyMultipleImages copies some or all of an image list's instances, using
-// policyContext to validate source image admissibility.
-func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) {
- // Parse the list and get a copy of the original value after it's re-encoded.
- manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error reading manifest list")
- }
- originalList, err := manifest.ListFromBlob(manifestList, manifestType)
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList))
- }
- updatedList := originalList.Clone()
-
- // Read and/or clear the set of signatures for this list.
- var sigs [][]byte
- if options.RemoveSignatures {
- sigs = [][]byte{}
- } else {
- c.Printf("Getting image list signatures\n")
- s, err := c.rawSource.GetSignatures(ctx, nil)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error reading signatures")
- }
- sigs = s
- }
- if len(sigs) != 0 {
- c.Printf("Checking if image list destination supports signatures\n")
- if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
- }
- }
- canModifyManifestList := (len(sigs) == 0)
-
- // Determine if we'll need to convert the manifest list to a different format.
- forceListMIMEType := options.ForceManifestMIMEType
- switch forceListMIMEType {
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
- forceListMIMEType = manifest.DockerV2ListMediaType
- case imgspecv1.MediaTypeImageManifest:
- forceListMIMEType = imgspecv1.MediaTypeImageIndex
- }
- selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination")
- }
- if selectedListType != originalList.MIMEType() {
- if !canModifyManifestList {
- return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
- }
- }
-
- // Copy each image, or just the ones we want to copy, in turn.
- instanceDigests := updatedList.Instances()
- imagesToCopy := len(instanceDigests)
- if options.ImageListSelection == CopySpecificImages {
- imagesToCopy = len(options.Instances)
- }
- c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
- updates := make([]manifest.ListUpdate, len(instanceDigests))
- instancesCopied := 0
- for i, instanceDigest := range instanceDigests {
- if options.ImageListSelection == CopySpecificImages {
- skip := true
- for _, instance := range options.Instances {
- if instance == instanceDigest {
- skip = false
- break
- }
- }
- if skip {
- update, err := updatedList.Instance(instanceDigest)
- if err != nil {
- return nil, "", err
- }
- logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
- // Record the digest/size/type of the manifest that we didn't copy.
- updates[i] = update
- continue
- }
- }
- logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
- c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
- unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
- updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
- if err != nil {
- return nil, "", err
- }
- instancesCopied++
- // Record the result of a possible conversion here.
- update := manifest.ListUpdate{
- Digest: updatedManifestDigest,
- Size: int64(len(updatedManifest)),
- MediaType: updatedManifestType,
- }
- updates[i] = update
- }
-
- // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
- if err = updatedList.UpdateInstances(updates); err != nil {
- return nil, "", errors.Wrapf(err, "Error updating manifest list")
- }
-
- // Iterate through supported list types, preferred format first.
- c.Printf("Writing manifest list to image destination\n")
- var errs []string
- for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
- attemptedList := updatedList
-
- logrus.Debugf("Trying to use manifest list type %s…", thisListType)
-
- // Perform the list conversion, if we need one.
- if thisListType != updatedList.MIMEType() {
- attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", thisListType)
- }
- }
-
- // Check if the updates or a type conversion meaningfully changed the list of images
- // by serializing them both so that we can compare them.
- attemptedManifestList, err := attemptedList.Serialize()
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
- }
- originalManifestList, err := originalList.Serialize()
- if err != nil {
- return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
- }
-
- // If we can't just use the original value, but we have to change it, flag an error.
- if !bytes.Equal(attemptedManifestList, originalManifestList) {
- if !canModifyManifestList {
- return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
- }
- logrus.Debugf("Manifest list has been updated")
- } else {
- // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
- attemptedManifestList = manifestList
- }
-
- // Save the manifest list.
- err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
- if err != nil {
- logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
- errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
- continue
- }
- errs = nil
- manifestList = attemptedManifestList
- break
- }
- if errs != nil {
- return nil, "", fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
- }
-
- // Sign the manifest list.
- if options.SignBy != "" {
- newSig, err := c.createSignature(manifestList, options.SignBy)
- if err != nil {
- return nil, "", err
- }
- sigs = append(sigs, newSig)
- }
-
- c.Printf("Storing list signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
- return nil, "", errors.Wrap(err, "Error writing signatures")
- }
-
- return manifestList, selectedListType, nil
-}
-
-// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
-// source image admissibility.
-func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
- // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
- // Make sure we fail cleanly in such cases.
- multiImage, err := isMultiImage(ctx, unparsedImage)
- if err != nil {
- // FIXME FIXME: How to name a reference for the sub-image?
- return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
- }
- if multiImage {
- return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
- }
-
- // Please keep this policy check BEFORE reading any other information about the image.
- // (The multiImage check above only matches the MIME type, which we have received anyway.
- // Actual parsing of anything should be deferred.)
- if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
- return nil, "", "", errors.Wrap(err, "Source image rejected")
- }
- src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
- }
-
- // If the destination is a digested reference, make a note of that, determine what digest value we're
- // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
- // one item from a manifest list that matches it, accept that as a match.
- destIsDigestedReference := false
- if named := c.dest.Reference().DockerReference(); named != nil {
- if digested, ok := named.(reference.Digested); ok {
- destIsDigestedReference = true
- sourceManifest, _, err := src.Manifest(ctx)
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
- }
- matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
- }
- if !matches {
- manifestList, _, err := unparsedToplevel.Manifest(ctx)
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
- }
- matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
- }
- if !matches {
- return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
- }
- }
- }
- }
-
- if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
- return nil, "", "", err
- }
-
- var sigs [][]byte
- if options.RemoveSignatures {
- sigs = [][]byte{}
- } else {
- c.Printf("Getting image source signatures\n")
- s, err := src.Signatures(ctx)
- if err != nil {
- return nil, "", "", errors.Wrap(err, "Error reading signatures")
- }
- sigs = s
- }
- if len(sigs) != 0 {
- c.Printf("Checking if image destination supports signatures\n")
- if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
- }
- }
-
- ic := imageCopier{
- c: c,
- manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
- src: src,
- // diffIDsAreNeeded is computed later
- canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
- ociEncryptLayers: options.OciEncryptLayers,
- }
- // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
- // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
- // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
- // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
- // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
- // and we would reuse and sign it.
- ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
-
- if err := ic.updateEmbeddedDockerReference(); err != nil {
- return nil, "", "", err
- }
-
- destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
-
- // We compute preferredManifestMIMEType only to show it in error messages.
- // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
- preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
- if err != nil {
- return nil, "", "", err
+ if options.ReportResolvedReference != nil {
+ *options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport.
}
-
- // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
- ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
- // If encrypted and decryption keys provided, we should try to decrypt
- ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
-
- // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
- if options.OptimizeDestinationImageAlreadyExists {
- shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
- noPendingManifestUpdates := ic.noPendingManifestUpdates()
-
- logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
- if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
- isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
- if err != nil {
- logrus.Warnf("Failed to compare destination image manifest: %v", err)
- return nil, "", "", err
- }
-
- if isSrcDestManifestEqual {
- c.Printf("Skipping: image already present at destination\n")
- return retManifest, retManifestType, retManifestDigest, nil
- }
- }
+ if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{
+ UnparsedToplevel: c.unparsedToplevel,
+ ReportResolvedReference: options.ReportResolvedReference,
+ }); err != nil {
+ return nil, fmt.Errorf("committing the finished image: %w", err)
}
- if err := ic.copyLayers(ctx); err != nil {
- return nil, "", "", err
- }
-
- // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
- // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
- // without actually trying to upload something and getting a types.ManifestTypeRejectedError.
- // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
- // we're altering how they're compressed. If the process succeeds, fine…
- manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
- retManifestType = preferredManifestMIMEType
- if err != nil {
- logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
- // … if it fails, and the failure is either because the manifest is rejected by the registry, or
- // because we failed to create a manifest of the specified type because the specific manifest type
- // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
- // have other options available that could still succeed.
- _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
- _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
- if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
- // We don’t have other options.
- // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
- // Don’t bother the user with MIME types if we have no choice.
- return nil, "", "", err
- }
- // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
- // So if we are here, we will definitely be trying to convert the manifest.
- // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
- // so let’s bail out early and with a better error message.
- if !ic.canModifyManifest {
- return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)")
- }
-
- // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
- errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
- for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
- logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
- ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
- attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
- if err != nil {
- logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
- errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
- continue
- }
-
- // We have successfully uploaded a manifest.
- manifestBytes = attemptedManifest
- retManifestDigest = attemptedManifestDigest
- retManifestType = manifestMIMEType
- errs = nil // Mark this as a success so that we don't abort below.
- break
- }
- if errs != nil {
- return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
- }
- }
- if targetInstance != nil {
- targetInstance = &retManifestDigest
- }
-
- if options.SignBy != "" {
- newSig, err := c.createSignature(manifestBytes, options.SignBy)
- if err != nil {
- return nil, "", "", err
- }
- sigs = append(sigs, newSig)
- }
-
- c.Printf("Storing signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
- return nil, "", "", errors.Wrap(err, "Error writing signatures")
- }
-
- return manifestBytes, retManifestType, retManifestDigest, nil
+ return copiedManifest, nil
}
// Printf writes a formatted string to c.reportWriter.
@@ -800,854 +366,44 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// has a built-in list of functions/methods (whatever object they are for)
// which have their format strings checked; for other names we would have
// to pass a parameter to every (go tool vet) invocation.
-func (c *copier) Printf(format string, a ...interface{}) {
+func (c *copier) Printf(format string, a ...any) {
fmt.Fprintf(c.reportWriter, format, a...)
}
-// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
-func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
- if dest.MustMatchRuntimeOS() {
- c, err := src.OCIConfig(ctx)
- if err != nil {
- return errors.Wrapf(err, "Error parsing image configuration")
- }
- wantedPlatforms, err := platform.WantedPlatforms(sys)
- if err != nil {
- return errors.Wrapf(err, "error getting current platform information %#v", sys)
- }
-
- options := newOrderedSet()
- match := false
- for _, wantedPlatform := range wantedPlatforms {
- // Waiting for https://github.com/opencontainers/image-spec/pull/777 :
- // This currently can’t use image.MatchesPlatform because we don’t know what to use
- // for image.Variant.
- if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
- match = true
- break
- }
- options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
- }
- if !match {
- logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
- c.OS, c.Architecture, strings.Join(options.list, ", "))
+// close tears down state owned by copier.
+func (c *copier) close() {
+ for i, s := range c.signersToClose {
+ if err := s.Close(); err != nil {
+ logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err)
}
}
- return nil
}
-// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
-func (ic *imageCopier) updateEmbeddedDockerReference() error {
- if ic.c.dest.IgnoresEmbeddedDockerReference() {
- return nil // Destination would prefer us not to update the embedded reference.
- }
- destRef := ic.c.dest.Reference().DockerReference()
- if destRef == nil {
- return nil // Destination does not care about Docker references
- }
- if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
- return nil // No reference embedded in the manifest, or it matches destRef already.
- }
-
- if !ic.canModifyManifest {
- return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)",
- transports.ImageName(ic.c.dest.Reference()), destRef.String())
+// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
+func validateImageListSelection(selection ImageListSelection) error {
+ switch selection {
+ case CopySystemImage, CopyAllImages, CopySpecificImages:
+ return nil
+ default:
+ return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection)
}
- ic.manifestUpdates.EmbeddedDockerReference = destRef
- return nil
}
-func (ic *imageCopier) noPendingManifestUpdates() bool {
- return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
+// Checks if the destination supports accepting multiple images by checking if it can support
+// manifest types that are lists of other manifests.
+func supportsMultipleImages(dest types.ImageDestination) bool {
+ mtypes := dest.SupportedManifestMIMETypes()
+ if len(mtypes) == 0 {
+ // Anything goes!
+ return true
+ }
+ return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage)
}
// isTTY returns true if the io.Writer is a file and a tty.
func isTTY(w io.Writer) bool {
if f, ok := w.(*os.File); ok {
- return terminal.IsTerminal(int(f.Fd()))
- }
- return false
-}
-
-// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
-func (ic *imageCopier) copyLayers(ctx context.Context) error {
- srcInfos := ic.src.LayerInfos()
- numLayers := len(srcInfos)
- updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
- if err != nil {
- return err
- }
- srcInfosUpdated := false
- // If we only need to check authorization, no updates required.
- if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
- if !ic.canModifyManifest {
- return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)")
- }
- srcInfos = updatedSrcInfos
- srcInfosUpdated = true
- }
-
- type copyLayerData struct {
- destInfo types.BlobInfo
- diffID digest.Digest
- err error
- }
-
- // copyGroup is used to determine if all layers are copied
- copyGroup := sync.WaitGroup{}
-
- // copySemaphore is used to limit the number of parallel downloads to
- // avoid malicious images causing troubles and to be nice to servers.
- var copySemaphore *semaphore.Weighted
- if ic.c.copyInParallel {
- max := ic.c.maxParallelDownloads
- if max == 0 {
- max = maxParallelDownloads
- }
- copySemaphore = semaphore.NewWeighted(int64(max))
- } else {
- copySemaphore = semaphore.NewWeighted(int64(1))
- }
-
- data := make([]copyLayerData, numLayers)
- copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
- defer copySemaphore.Release(1)
- defer copyGroup.Done()
- cld := copyLayerData{}
- if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
- // DiffIDs are, currently, needed only when converting from schema1.
- // In which case src.LayerInfos will not have URLs because schema1
- // does not support them.
- if ic.diffIDsAreNeeded {
- cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
- } else {
- cld.destInfo = srcLayer
- logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
- }
- } else {
- cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef)
- }
- data[index] = cld
- }
-
- // Create layer Encryption map
- encLayerBitmap := map[int]bool{}
- var encryptAll bool
- if ic.ociEncryptLayers != nil {
- encryptAll = len(*ic.ociEncryptLayers) == 0
- totalLayers := len(srcInfos)
- for _, l := range *ic.ociEncryptLayers {
- // if layer is negative, it is reverse indexed.
- encLayerBitmap[(totalLayers+l)%totalLayers] = true
- }
-
- if encryptAll {
- for i := 0; i < len(srcInfos); i++ {
- encLayerBitmap[i] = true
- }
- }
- }
-
- if err := func() error { // A scope for defer
- progressPool, progressCleanup := ic.c.newProgressPool(ctx)
- defer func() {
- // Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool.
- copyGroup.Wait()
- progressCleanup()
- }()
-
- for i, srcLayer := range srcInfos {
- err = copySemaphore.Acquire(ctx, 1)
- if err != nil {
- return errors.Wrapf(err, "Can't acquire semaphore")
- }
- copyGroup.Add(1)
- go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference())
- }
-
- // A call to copyGroup.Wait() is done at this point by the defer above.
- return nil
- }(); err != nil {
- return err
- }
-
- destInfos := make([]types.BlobInfo, numLayers)
- diffIDs := make([]digest.Digest, numLayers)
- for i, cld := range data {
- if cld.err != nil {
- return cld.err
- }
- destInfos[i] = cld.destInfo
- diffIDs[i] = cld.diffID
- }
-
- // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
- // OptimizeDestinationImageAlreadyExists short-circuit conditions
- ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
- if ic.diffIDsAreNeeded {
- ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
- }
- if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
- ic.manifestUpdates.LayerInfos = destInfos
- }
- return nil
-}
-
-// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
-func layerDigestsDiffer(a, b []types.BlobInfo) bool {
- if len(a) != len(b) {
- return true
- }
- for i := range a {
- if a[i].Digest != b[i].Digest {
- return true
- }
+ return term.IsTerminal(int(f.Fd()))
}
return false
}
-
-// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
-// stores the resulting config and manifest to the destination, and returns the stored manifest
-// and its digest.
-func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
- pendingImage := ic.src
- if !ic.noPendingManifestUpdates() {
- if !ic.canModifyManifest {
- return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
- }
- if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
- // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
- // So, this can only happen if we are trying to upload using one of the other MIME type candidates.
- // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
- // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
- // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
- // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
- return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
- }
- pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error creating an updated image manifest")
- }
- pendingImage = pi
- }
- man, _, err := pendingImage.Manifest(ctx)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error reading manifest")
- }
-
- if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
- return nil, "", err
- }
-
- ic.c.Printf("Writing manifest to image destination\n")
- manifestDigest, err := manifest.Digest(man)
- if err != nil {
- return nil, "", err
- }
- if instanceDigest != nil {
- instanceDigest = &manifestDigest
- }
- if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
- return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man))
- }
- return man, manifestDigest, nil
-}
-
-// newProgressPool creates a *mpb.Progress and a cleanup function.
-// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
-func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
- ctx, cancel := context.WithCancel(ctx)
- pool := mpb.NewWithContext(ctx, mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
- return pool, func() {
- cancel()
- pool.Wait()
- }
-}
-
-// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
-// is ioutil.Discard, the progress bar's output will be discarded
-func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
- // shortDigestLen is the length of the digest used for blobs.
- const shortDigestLen = 12
-
- prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
- // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
- maxPrefixLen := len("Copying blob ") + shortDigestLen
- if len(prefix) > maxPrefixLen {
- prefix = prefix[:maxPrefixLen]
- }
-
- // onComplete will replace prefix once the bar/spinner has completed
- onComplete = prefix + " " + onComplete
-
- // Use a normal progress bar when we know the size (i.e., size > 0).
- // Otherwise, use a spinner to indicate that something's happening.
- var bar *mpb.Bar
- if info.Size > 0 {
- bar = pool.AddBar(info.Size,
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- mpb.AppendDecorators(
- decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
- ),
- )
- } else {
- bar = pool.Add(0,
- mpb.NewSpinnerFiller([]string{".", "..", "...", "....", ""}, mpb.SpinnerOnLeft),
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- )
- }
- if c.progressOutput == ioutil.Discard {
- c.Printf("Copying %s %s\n", kind, info.Digest)
- }
- return bar
-}
-
-// copyConfig copies config.json, if any, from src to dest.
-func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
- srcInfo := src.ConfigInfo()
- if srcInfo.Digest != "" {
- configBlob, err := src.ConfigBlob(ctx)
- if err != nil {
- return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
- }
-
- destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
- progressPool, progressCleanup := c.newProgressPool(ctx)
- defer progressCleanup()
- bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1)
- if err != nil {
- return types.BlobInfo{}, err
- }
- bar.SetTotal(int64(len(configBlob)), true)
- return destInfo, nil
- }()
- if err != nil {
- return err
- }
- if destInfo.Digest != srcInfo.Digest {
- return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
- }
- }
- return nil
-}
-
-// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
-// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
-type diffIDResult struct {
- digest digest.Digest
- err error
-}
-
-// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
-// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-// srcRef can be used as an additional hint to the destination during checking whehter a layer can be reused but srcRef can be nil.
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named) (types.BlobInfo, digest.Digest, error) {
- // If the srcInfo doesn't contain compression information, try to compute it from the
- // MediaType, which was either read from a manifest by way of LayerInfos() or constructed
- // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
- // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
- // which uses the compression information to compute the updated MediaType values.
- // (Sadly UpdatedImage() is documented to not update MediaTypes from
- // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
- //
- // This MIME type → compression mapping belongs in manifest-specific code in our manifest
- // package (but we should preferably replace/change UpdatedImage instead of productizing
- // this workaround).
- if srcInfo.CompressionAlgorithm == nil {
- switch srcInfo.MediaType {
- case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
- srcInfo.CompressionAlgorithm = &compression.Gzip
- case imgspecv1.MediaTypeImageLayerZstd:
- srcInfo.CompressionAlgorithm = &compression.Zstd
- }
- }
-
- cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
- // Diffs are needed if we are encrypting an image or trying to decrypt an image
- diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
-
- // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
- if !diffIDIsNeeded {
- // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
- // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
- // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
- // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
- // Fixing that will probably require passing more information to TryReusingBlob() than the current version of
- // the ImageDestination interface lets us pass in.
- var (
- blobInfo types.BlobInfo
- reused bool
- err error
- )
- // Note: the storage destination optimizes the committing of
- // layers which requires passing the index of the layer.
- // Hence, we need to special case and cast.
- dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.TryReusingBlobOptions{
- Cache: ic.c.blobInfoCache,
- CanSubstitute: ic.canSubstituteBlobs,
- SrcRef: srcRef,
- }
- if enableEarlyCommit {
- options.LayerIndex = &layerIndex
- }
- reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
- } else {
- reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
- }
-
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
- }
- if reused {
- logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
- bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
- bar.SetTotal(0, true)
-
- // Throw an event that the layer has been skipped
- if ic.c.progress != nil && ic.c.progressInterval > 0 {
- ic.c.progress <- types.ProgressProperties{
- Event: types.ProgressEventSkipped,
- Artifact: srcInfo,
- }
- }
-
- // If the reused blob has the same digest as the one we asked for, but
- // the transport didn't/couldn't supply compression info, fill it in based
- // on what we know from the srcInfos we were given.
- // If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will
- // call UpdateLayerInfos(), which uses this information to compute the
- // MediaType value for the updated layer infos, and it the transport
- // didn't pass the information along from its input to its output, then
- // it can derive the MediaType incorrectly.
- if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil {
- blobInfo.CompressionOperation = srcInfo.CompressionOperation
- blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm
- }
- return blobInfo, cachedDiffID, nil
- }
- }
-
- // Fallback: copy the layer, computing the diffID if we need to do so
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
- }
- defer srcStream.Close()
-
- bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
-
- blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex)
- if err != nil {
- return types.BlobInfo{}, "", err
- }
-
- diffID := cachedDiffID
- if diffIDIsNeeded {
- select {
- case <-ctx.Done():
- return types.BlobInfo{}, "", ctx.Err()
- case diffIDResult := <-diffIDChan:
- if diffIDResult.err != nil {
- return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
- }
- logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
- // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
- ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
- diffID = diffIDResult.digest
- }
- }
-
- bar.SetTotal(srcInfo.Size, true)
- return blobInfo, diffID, nil
-}
-
-// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer†scope.
-// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
-// perhaps (de/re/)compressing the stream,
-// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
-func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, <-chan diffIDResult, error) {
- var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
- var diffIDChan chan diffIDResult
-
- err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
- if diffIDIsNeeded {
- diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
- pipeReader, pipeWriter := io.Pipe()
- defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
- _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
- }()
-
- getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
- // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
- // reading from the pipe has failed, we don’t really care.
- // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
- // the return value includes an error indication, which we do check.
- //
- // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
- // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
- go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
- return pipeWriter
- }
- }
-
- blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex) // Sets err to nil on success
- return blobInfo, diffIDChan, err
- // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
-}
-
-// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
-func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
- result := diffIDResult{
- digest: "",
- err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
- }
- defer func() { dest <- result }()
- defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
-
- result.digest, result.err = computeDiffID(layerStream, decompressor)
-}
-
-// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
-func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
- if decompressor != nil {
- s, err := decompressor(stream)
- if err != nil {
- return "", err
- }
- defer s.Close()
- stream = s
- }
-
- return digest.Canonical.FromReader(stream)
-}
-
-// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
-// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
-type errorAnnotationReader struct {
- reader io.Reader
-}
-
-// Read annotates the error happened during read
-func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
- n, err = r.reader.Read(b)
- if err != io.EOF {
- return n, errors.Wrapf(err, "error happened during read")
- }
- return n, err
-}
-
-// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
-// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
-// perhaps (de/re/)compressing it if canModifyBlob,
-// and returns a complete blobInfo of the copied blob.
-func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, error) {
- if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
- canModifyBlob = false
- }
-
- // The copying happens through a pipeline of connected io.Readers.
- // === Input: srcStream
-
- // === Process input through digestingReader to validate against the expected digest.
- // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
- // use a separate validation failure indicator.
- // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
- // dest.PutBlob may detect that the layer already exists, in which case we don't
- // read stream to the end, and validation does not happen.
- digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
- }
- var destStream io.Reader = digestingReader
-
- // === Decrypt the stream, if required.
- var decrypted bool
- if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
- newDesc := imgspecv1.Descriptor{
- Annotations: srcInfo.Annotations,
- }
-
- var d digest.Digest
- destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
- }
-
- srcInfo.Digest = d
- srcInfo.Size = -1
- for k := range srcInfo.Annotations {
- if strings.HasPrefix(k, "org.opencontainers.image.enc") {
- delete(srcInfo.Annotations, k)
- }
- }
- decrypted = true
- }
-
- // === Detect compression of the input stream.
- // This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
- compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
- }
- isCompressed := decompressor != nil
- if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
- logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
- }
-
- // === Update progress bars
- destStream = bar.ProxyReader(destStream)
-
- // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
- var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
- if getOriginalLayerCopyWriter != nil {
- destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
- originalLayerReader = destStream
- }
-
- // === Deal with layer compression/decompression if necessary
- // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
- // short-circuit conditions
- var inputInfo types.BlobInfo
- var compressionOperation types.LayerCompression
- uploadCompressionFormat := &c.compressionFormat
- srcCompressorName := internalblobinfocache.Uncompressed
- if isCompressed {
- srcCompressorName = compressionFormat.Name()
- }
- var uploadCompressorName string
- if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
- // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
- logrus.Debugf("Using original blob without modification for encrypted blob")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- srcCompressorName = internalblobinfocache.UnknownCompression
- uploadCompressorName = internalblobinfocache.UnknownCompression
- uploadCompressionFormat = nil
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
- logrus.Debugf("Compressing blob on the fly")
- compressionOperation = types.Compress
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
- // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
- // we don’t care.
- go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() {
- // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
- // re-compressed using the desired format.
- logrus.Debugf("Blob will be converted")
-
- compressionOperation = types.PreserveOriginal
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
-
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter
-
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
- logrus.Debugf("Blob will be decompressed")
- compressionOperation = types.Decompress
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
- destStream = s
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = internalblobinfocache.Uncompressed
- uploadCompressionFormat = nil
- } else {
- // PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
- logrus.Debugf("Using original blob without modification")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- uploadCompressorName = srcCompressorName
- // Remember if the original blob was compressed, and if so how, so that if
- // LayerInfosForCopy() returned something that differs from what was in the
- // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
- // it will be able to correctly derive the MediaType for the copied blob.
- if isCompressed {
- uploadCompressionFormat = &compressionFormat
- } else {
- uploadCompressionFormat = nil
- }
- }
-
- // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
- var (
- encrypted bool
- finalizer ocicrypt.EncryptLayerFinalizer
- )
- if toEncrypt {
- if decrypted {
- return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
- }
-
- if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
- var annotations map[string]string
- if !decrypted {
- annotations = srcInfo.Annotations
- }
- desc := imgspecv1.Descriptor{
- MediaType: srcInfo.MediaType,
- Digest: srcInfo.Digest,
- Size: srcInfo.Size,
- Annotations: annotations,
- }
-
- s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "Error encrypting blob %s", srcInfo.Digest)
- }
-
- destStream = s
- finalizer = fin
- inputInfo.Digest = ""
- inputInfo.Size = -1
- encrypted = true
- }
- }
-
- // === Report progress using the c.progress channel, if required.
- if c.progress != nil && c.progressInterval > 0 {
- progressReader := newProgressReader(
- destStream,
- c.progress,
- c.progressInterval,
- srcInfo,
- )
- defer progressReader.reportDone()
- destStream = progressReader
- }
-
- // === Finally, send the layer stream to dest.
- var uploadedInfo types.BlobInfo
- // Note: the storage destination optimizes the committing of layers
- // which requires passing the index of the layer. Hence, we need to
- // special case and cast.
- dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.PutBlobOptions{
- Cache: c.blobInfoCache,
- IsConfig: isConfig,
- }
- if !isConfig && enableEarlyCommit {
- options.LayerIndex = &layerIndex
- }
- uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
- } else {
- uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
- }
-
- uploadedInfo.Annotations = srcInfo.Annotations
-
- uploadedInfo.CompressionOperation = compressionOperation
- // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
- uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
- if decrypted {
- uploadedInfo.CryptoOperation = types.Decrypt
- } else if encrypted {
- encryptAnnotations, err := finalizer()
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
- }
- uploadedInfo.CryptoOperation = types.Encrypt
- if uploadedInfo.Annotations == nil {
- uploadedInfo.Annotations = map[string]string{}
- }
- for k, v := range encryptAnnotations {
- uploadedInfo.Annotations[k] = v
- }
- }
-
- // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
- // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
- // So, read everything from originalLayerReader, which will cause the rest to be
- // sent there if we are not already at EOF.
- if getOriginalLayerCopyWriter != nil {
- logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
- _, err := io.Copy(ioutil.Discard, originalLayerReader)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
- }
- }
-
- if digestingReader.validationFailed { // Coverage: This should never happen.
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
- }
- if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
- }
- if digestingReader.validationSucceeded {
- // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
- // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
- // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
- // (because inputInfo.Digest == "", this must have been computed afresh).
- switch compressionOperation {
- case types.PreserveOriginal:
- break // Do nothing, we have only one digest and we might not have even verified it.
- case types.Compress:
- c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
- case types.Decompress:
- c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
- default:
- return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
- }
- if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
- }
- if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
- }
- }
- return uploadedInfo, nil
-}
-
-// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
-func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
- err := errors.New("Internal error: unexpected panic in compressGoroutine")
- defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
- _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
- }()
-
- compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
- if err != nil {
- return
- }
- defer compressor.Close()
-
- buf := make([]byte, compressionBufferSize)
-
- _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
-}
diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
new file mode 100644
index 000000000..4c6ba82ee
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
@@ -0,0 +1,62 @@
+package copy
+
+import (
+ "fmt"
+ "hash"
+ "io"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type digestingReader struct {
+ source io.Reader
+ digester digest.Digester
+ hash hash.Hash
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
+}
+
+// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
+func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ var digester digest.Digester
+ if err := expectedDigest.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err)
+ }
+ digestAlgorithm := expectedDigest.Algorithm()
+ if !digestAlgorithm.Available() {
+ return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm)
+ }
+ digester = digestAlgorithm.Digester()
+
+ return &digestingReader{
+ source: source,
+ digester: digester,
+ hash: digester.Hash(),
+ expectedDigest: expectedDigest,
+ validationFailed: false,
+ }, nil
+}
+
+func (d *digestingReader) Read(p []byte) (int, error) {
+ n, err := d.source.Read(p)
+ if n > 0 {
+ if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil {
+ // Coverage: This should not happen, the hash.Hash interface requires
+ // d.digest.Write to never return an error, and the io.Writer interface
+ // requires n2 == len(input) if no error is returned.
+ return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err)
+ }
+ }
+ if err == io.EOF {
+ actualDigest := d.digester.Digest()
+ if actualDigest != d.expectedDigest {
+ d.validationFailed = true
+ return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ }
+ d.validationSucceeded = true
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go
deleted file mode 100644
index a18d6f151..000000000
--- a/vendor/github.com/containers/image/v5/copy/encrypt.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package copy
-
-import (
- "strings"
-
- "github.com/containers/image/v5/types"
-)
-
-// isOciEncrypted returns a bool indicating if a mediatype is encrypted
-// This function will be moved to be part of OCI spec when adopted.
-func isOciEncrypted(mediatype string) bool {
- return strings.HasSuffix(mediatype, "+encrypted")
-}
-
-// isEncrypted checks if an image is encrypted
-func isEncrypted(i types.Image) bool {
- layers := i.LayerInfos()
- for _, l := range layers {
- if isOciEncrypted(l.MediaType) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go
new file mode 100644
index 000000000..b5a88a914
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/encryption.go
@@ -0,0 +1,138 @@
+package copy
+
+import (
+ "fmt"
+ "maps"
+ "slices"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/ocicrypt"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// isOciEncrypted returns a bool indicating if a mediatype is encrypted
+// This function will be moved to be part of OCI spec when adopted.
+func isOciEncrypted(mediatype string) bool {
+ return strings.HasSuffix(mediatype, "+encrypted")
+}
+
+// isEncrypted checks if an image is encrypted
+func isEncrypted(i types.Image) bool {
+ layers := i.LayerInfos()
+ return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
+ return isOciEncrypted(l.MediaType)
+ })
+}
+
+// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
+type bpDecryptionStepData struct {
+ decrypting bool // We are actually decrypting the stream
+}
+
+// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
+// srcInfo is only used for error messages.
+// Returns data for other steps; the caller should eventually use updateCryptoOperation.
+func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
+ if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil {
+ return &bpDecryptionStepData{
+ decrypting: false,
+ }, nil
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ desc := imgspecv1.Descriptor{
+ Annotations: stream.info.Annotations,
+ }
+ // DecryptLayer supposedly returns a digest of the decrypted stream.
+ // In practice, that value is never set in the current implementation.
+ // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
+ // i.e. it doesn’t authenticate the origin of the metadata in any way.
+ reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
+ if err != nil {
+ return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
+ maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
+ return strings.HasPrefix(k, "org.opencontainers.image.enc")
+ })
+ return &bpDecryptionStepData{
+ decrypting: true,
+ }, nil
+}
+
+// updateCryptoOperation sets *operation, if necessary.
+func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
+ if d.decrypting {
+ *operation = types.Decrypt
+ }
+}
+
+// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
+type bpEncryptionStepData struct {
+ encrypting bool // We are actually encrypting the stream
+ finalizer ocicrypt.EncryptLayerFinalizer
+}
+
+// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
+func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
+ decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
+ if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil {
+ return &bpEncryptionStepData{
+ encrypting: false,
+ }, nil
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ var annotations map[string]string
+ if !decryptionStep.decrypting {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+ reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc)
+ if err != nil {
+ return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
+ return &bpEncryptionStepData{
+ encrypting: true,
+ finalizer: finalizer,
+ }, nil
+}
+
+// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
+func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
+ if !d.encrypting {
+ return nil
+ }
+
+ encryptAnnotations, err := d.finalizer()
+ if err != nil {
+ return fmt.Errorf("Unable to finalize encryption: %w", err)
+ }
+ *operation = types.Encrypt
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ maps.Copy(*annotations, encryptAnnotations)
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index 0c0164cbf..97837f9f2 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -2,11 +2,17 @@ package copy
import (
"context"
+ "errors"
+ "fmt"
+ "slices"
"strings"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -15,10 +21,13 @@ import (
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
+// allManifestMIMETypes lists all possible manifest MIME types.
+var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType}
+
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct {
list []string
- included map[string]struct{}
+ included *set.Set[string]
}
// newOrderedSet creates a correctly initialized orderedSet.
@@ -26,44 +35,114 @@ type orderedSet struct {
func newOrderedSet() *orderedSet {
return &orderedSet{
list: []string{},
- included: map[string]struct{}{},
+ included: set.New[string](),
}
}
// append adds s to the end of os, only if it is not included already.
func (os *orderedSet) append(s string) {
- if _, ok := os.included[s]; !ok {
+ if !os.included.Contains(s) {
os.list = append(os.list, s)
- os.included[s] = struct{}{}
+ os.included.Add(s)
}
}
-// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
-// Note that the conversion will only happen later, through ic.src.UpdatedImage
-// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
-// and a list of other possible alternatives, in order.
-func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
- _, srcType, err := ic.src.Manifest(ctx)
- if err != nil { // This should have been cached?!
- return "", nil, errors.Wrap(err, "Error reading manifest")
- }
+// determineManifestConversionInputs contains the inputs for determineManifestConversion.
+type determineManifestConversionInputs struct {
+ srcMIMEType string // MIME type of the input manifest
+
+ destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
+
+ forceManifestMIMEType string // User’s choice of forced manifest MIME type
+ requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one.
+ requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+}
+
+// manifestConversionPlan contains the decisions made by determineManifestConversion.
+type manifestConversionPlan struct {
+ // The preferred manifest MIME type (whether we are converting to it or using it unmodified).
+ // We compute this only to show it in error messages; without having to add this context
+ // in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredMIMEType string
+ preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
+ otherMIMETypeCandidates []string // Other possible alternatives, in order
+}
+
+// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
+func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
+ srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
- logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
+ logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
- if forceManifestMIMEType != "" {
- destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
+ destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
+ if in.forceManifestMIMEType != "" {
+ destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
-
- if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
- return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
+ if len(destSupportedManifestMIMETypes) == 0 {
+ destSupportedManifestMIMETypes = allManifestMIMETypes
}
- supportedByDest := map[string]struct{}{}
+
+ restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
+ supportedByDest := set.New[string]()
for _, t := range destSupportedManifestMIMETypes {
- if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
- supportedByDest[t] = struct{}{}
+ if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) {
+ continue
+ }
+ if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) {
+ continue
+ }
+ supportedByDest.Add(t)
+ }
+ if supportedByDest.Empty() {
+ if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes
+ return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
+ }
+ // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved.
+
+ // destSupportedManifestMIMETypes has three possible origins:
+ if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
+ switch {
+ case in.requiresOCIEncryption && restrictiveCompressionRequired:
+ return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both",
+ in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
+ case in.requiresOCIEncryption:
+ return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
+ in.forceManifestMIMEType)
+ case restrictiveCompressionRequired:
+ return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it",
+ in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
+ default:
+ return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason")
+ }
+ }
+ if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes
+ if !restrictiveCompressionRequired {
+ // Coverage: This should never happen.
+ // If we have not rejected for encryption reasons, we must have rejected due to encryption, but
+ // allManifestTypes includes OCI, which supports encryption.
+ return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
+ }
+ // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz.
+ return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name())
+ }
+ // 3. destination accepts a restricted list of mime types
+ destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ")
+ switch {
+ case in.requiresOCIEncryption && restrictiveCompressionRequired:
+ return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both",
+ in.requestedCompressionFormat.Name(), destMIMEList)
+ case in.requiresOCIEncryption:
+ return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
+ destMIMEList)
+ case restrictiveCompressionRequired:
+ return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it",
+ in.requestedCompressionFormat.Name(), destMIMEList)
+ default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired
+ return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm")
}
}
@@ -76,41 +155,48 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
prioritizedTypes := newOrderedSet()
// First of all, prefer to keep the original manifest unmodified.
- if _, ok := supportedByDest[srcType]; ok {
+ if supportedByDest.Contains(srcType) {
prioritizedTypes.append(srcType)
}
- if !ic.canModifyManifest {
- // We could also drop the !ic.canModifyManifest check and have the caller
+ if in.cannotModifyManifestReason != "" {
+ // We could also drop this check and have the caller
// make the choice; it is already doing that to an extent, to improve error
- // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversionâ€
+ // messages. But it is nice to hide the “if we can't modify, do no conversionâ€
// special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...")
- return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
+ return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
}
// Then use our list of preferred types.
for _, t := range preferredManifestMIMETypes {
- if _, ok := supportedByDest[t]; ok {
+ if supportedByDest.Contains(t) {
prioritizedTypes.append(t)
}
}
// Finally, try anything else the destination supports.
for _, t := range destSupportedManifestMIMETypes {
- prioritizedTypes.append(t)
+ if supportedByDest.Contains(t) {
+ prioritizedTypes.append(t)
+ }
}
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
- if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes†case above), so this should never happen.
- return "", nil, errors.New("Internal error: no candidate MIME types")
+ if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
+ return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
}
- preferredType := prioritizedTypes.list[0]
- if preferredType != srcType {
- ic.manifestUpdates.ManifestMIMEType = preferredType
- } else {
+ res := manifestConversionPlan{
+ preferredMIMEType: prioritizedTypes.list[0],
+ otherMIMETypeCandidates: prioritizedTypes.list[1:],
+ }
+ res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
+ if !res.preferredMIMETypeNeedsConversion {
logrus.Debugf("... will first try using the original manifest unmodified")
}
- return preferredType, prioritizedTypes.list[1:], nil
+ return res, nil
}
// isMultiImage returns true if img is a list of images
@@ -137,30 +223,26 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
if forcedListMIMEType != "" {
destSupportedMIMETypes = []string{forcedListMIMEType}
}
- var selectedType string
- var otherSupportedTypes []string
- for i := range destSupportedMIMETypes {
- // The second priority is the first member of the list of acceptable types that is a list,
- // but keep going in case current type occurs later in the list.
- if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
- selectedType = destSupportedMIMETypes[i]
- }
- // The first priority is the current type, if it's in the list, since that lets us avoid a
- // conversion that isn't strictly necessary.
- if destSupportedMIMETypes[i] == currentListMIMEType {
- selectedType = destSupportedMIMETypes[i]
- }
+
+ prioritizedTypes := newOrderedSet()
+ // The first priority is the current type, if it's in the list, since that lets us avoid a
+ // conversion that isn't strictly necessary.
+ if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
+ prioritizedTypes.append(currentListMIMEType)
}
// Pick out the other list types that we support.
- for i := range destSupportedMIMETypes {
- if selectedType != destSupportedMIMETypes[i] && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
- otherSupportedTypes = append(otherSupportedTypes, destSupportedMIMETypes[i])
+ for _, t := range destSupportedMIMETypes {
+ if manifest.MIMETypeIsMultiImage(t) {
+ prioritizedTypes.append(t)
}
}
- logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
- if selectedType == "" {
- return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
+
+ logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
+ if len(prioritizedTypes.list) == 0 {
+ return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
}
+ selectedType := prioritizedTypes.list[0]
+ otherSupportedTypes := prioritizedTypes.list[1:]
if selectedType != currentListMIMEType {
logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
} else {
diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go
new file mode 100644
index 000000000..009a067ce
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/multiple.go
@@ -0,0 +1,354 @@
+package copy
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type instanceCopyKind int
+
+const (
+ instanceCopyCopy instanceCopyKind = iota
+ instanceCopyClone
+)
+
+type instanceCopy struct {
+ op instanceCopyKind
+ sourceDigest digest.Digest
+
+ // Fields which can be used by callers when operation
+ // is `instanceCopyCopy`
+ copyForceCompressionFormat bool
+
+ // Fields which can be used by callers when operation
+ // is `instanceCopyClone`
+ cloneArtifactType string
+ cloneCompressionVariant OptionCompressionVariant
+ clonePlatform *imgspecv1.Platform
+ cloneAnnotations map[string]string
+}
+
+// internal type only to make imgspecv1.Platform comparable
+type platformComparable struct {
+ architecture string
+ os string
+ osVersion string
+ osFeatures string
+ variant string
+}
+
+// Converts imgspecv1.Platform to a comparable format.
+func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable {
+ if platform == nil {
+ return platformComparable{}
+ }
+ osFeatures := slices.Clone(platform.OSFeatures)
+ sort.Strings(osFeatures)
+ return platformComparable{architecture: platform.Architecture,
+ os: platform.OS,
+ // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now.
+ osFeatures: strings.Join(osFeatures, ","),
+ osVersion: platform.OSVersion,
+ variant: platform.Variant,
+ }
+}
+
+// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests
+func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) {
+ res := make(map[platformComparable]*set.Set[string])
+ for _, instanceDigest := range instanceDigests {
+ instanceDetails, err := list.Instance(instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
+ }
+ platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
+ platformSet, ok := res[platform]
+ if !ok {
+ platformSet = set.New[string]()
+ res[platform] = platformSet
+ }
+ platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames)
+ }
+ return res, nil
+}
+
+func validateCompressionVariantExists(input []OptionCompressionVariant) error {
+ for _, option := range input {
+ _, err := compression.AlgorithmByName(option.Algorithm.Name())
+ if err != nil {
+ return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err)
+ }
+ }
+ return nil
+}
+
+// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
+func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) {
+ res := []instanceCopy{}
+ if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 {
+ // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist`
+ // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances,
+ // EnsureCompressionVariantsExist asks for an instance with some compression,
+ // an instance with that compression already exists, but is not included in options.Instances.
+ // We might define the semantics and implement this in the future.
+ return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages")
+ }
+ err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist)
+ if err != nil {
+ return res, err
+ }
+ compressionsByPlatform, err := platformCompressionMap(list, instanceDigests)
+ if err != nil {
+ return nil, err
+ }
+ for i, instanceDigest := range instanceDigests {
+ if options.ImageListSelection == CopySpecificImages &&
+ !slices.Contains(options.Instances, instanceDigest) {
+ logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
+ continue
+ }
+ instanceDetails, err := list.Instance(instanceDigest)
+ if err != nil {
+ return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
+ }
+ forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, instanceCopy{
+ op: instanceCopyCopy,
+ sourceDigest: instanceDigest,
+ copyForceCompressionFormat: forceCompressionFormat,
+ })
+ platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
+ compressionList := compressionsByPlatform[platform]
+ for _, compressionVariant := range options.EnsureCompressionVariantsExist {
+ if !compressionList.Contains(compressionVariant.Algorithm.Name()) {
+ res = append(res, instanceCopy{
+ op: instanceCopyClone,
+ sourceDigest: instanceDigest,
+ cloneArtifactType: instanceDetails.ReadOnly.ArtifactType,
+ cloneCompressionVariant: compressionVariant,
+ clonePlatform: instanceDetails.ReadOnly.Platform,
+ cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
+ })
+ // add current compression to the list so that we don’t create duplicate clones
+ compressionList.Add(compressionVariant.Algorithm.Name())
+ }
+ }
+ }
+ return res, nil
+}
+
+// copyMultipleImages copies some or all of an image list's instances, using
+// c.policyContext to validate source image admissibility.
+func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) {
+ // Parse the list and get a copy of the original value after it's re-encoded.
+ manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading manifest list: %w", err)
+ }
+ originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
+ }
+ updatedList := originalList.CloneInternal()
+
+ sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel,
+ "Getting image list signatures",
+ "Checking if image list destination supports signatures")
+ if err != nil {
+ return nil, err
+ }
+
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ return nil, errors.New("Digest of source image's manifest would not match destination reference")
+ }
+ }
+ }
+
+ // Determine if we're allowed to modify the manifest list.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copySingleImage.
+ cannotModifyManifestListReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestListReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestListReason = "Destination specifies a digest"
+ }
+ if c.options.PreserveDigests {
+ cannotModifyManifestListReason = "Instructed to preserve digests"
+ }
+
+ // Determine if we'll need to convert the manifest list to a different format.
+ forceListMIMEType := c.options.ForceManifestMIMEType
+ switch forceListMIMEType {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ forceListMIMEType = manifest.DockerV2ListMediaType
+ case imgspecv1.MediaTypeImageManifest:
+ forceListMIMEType = imgspecv1.MediaTypeImageIndex
+ }
+ selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
+ if err != nil {
+ return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
+ }
+ if selectedListType != originalList.MIMEType() {
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
+ }
+ }
+
+ // Copy each image, or just the ones we want to copy, in turn.
+ instanceDigests := updatedList.Instances()
+ instanceEdits := []internalManifest.ListEdit{}
+ instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options)
+ if err != nil {
+ return nil, fmt.Errorf("preparing instances for copy: %w", err)
+ }
+ c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests))
+ for i, instance := range instanceCopyList {
+ // Update instances to be edited by their `ListOperation` and
+ // populate necessary fields.
+ switch instance.op {
+ case instanceCopyCopy:
+ logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
+ c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
+ updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
+ if err != nil {
+ return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
+ }
+ // Record the result of a possible conversion here.
+ instanceEdits = append(instanceEdits, internalManifest.ListEdit{
+ ListOperation: internalManifest.ListOpUpdate,
+ UpdateOldDigest: instance.sourceDigest,
+ UpdateDigest: updated.manifestDigest,
+ UpdateSize: int64(len(updated.manifest)),
+ UpdateCompressionAlgorithms: updated.compressionAlgorithms,
+ UpdateMediaType: updated.manifestMIMEType})
+ case instanceCopyClone:
+ logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
+ c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
+ updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{
+ requireCompressionFormatMatch: true,
+ compressionFormat: &instance.cloneCompressionVariant.Algorithm,
+ compressionLevel: instance.cloneCompressionVariant.Level})
+ if err != nil {
+ return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
+ }
+ // Record the result of a possible conversion here.
+ instanceEdits = append(instanceEdits, internalManifest.ListEdit{
+ ListOperation: internalManifest.ListOpAdd,
+ AddDigest: updated.manifestDigest,
+ AddSize: int64(len(updated.manifest)),
+ AddMediaType: updated.manifestMIMEType,
+ AddArtifactType: instance.cloneArtifactType,
+ AddPlatform: instance.clonePlatform,
+ AddAnnotations: instance.cloneAnnotations,
+ AddCompressionAlgorithms: updated.compressionAlgorithms,
+ })
+ default:
+ return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
+ }
+ }
+
+ // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
+ if err = updatedList.EditInstances(instanceEdits); err != nil {
+ return nil, fmt.Errorf("updating manifest list: %w", err)
+ }
+
+ // Iterate through supported list types, preferred format first.
+ c.Printf("Writing manifest list to image destination\n")
+ var errs []string
+ for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
+ var attemptedList internalManifest.ListPublic = updatedList
+
+ logrus.Debugf("Trying to use manifest list type %s…", thisListType)
+
+ // Perform the list conversion, if we need one.
+ if thisListType != updatedList.MIMEType() {
+ attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
+ if err != nil {
+ return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
+ }
+ }
+
+ // Check if the updates or a type conversion meaningfully changed the list of images
+ // by serializing them both so that we can compare them.
+ attemptedManifestList, err := attemptedList.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
+ }
+ originalManifestList, err := originalList.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
+ }
+
+ // If we can't just use the original value, but we have to change it, flag an error.
+ if !bytes.Equal(attemptedManifestList, originalManifestList) {
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
+ }
+ logrus.Debugf("Manifest list has been updated")
+ } else {
+ // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
+ attemptedManifestList = manifestList
+ }
+
+ // Save the manifest list.
+ err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
+ if err != nil {
+ logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
+ continue
+ }
+ errs = nil
+ manifestList = attemptedManifestList
+ break
+ }
+ if errs != nil {
+ return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+
+ // Sign the manifest list.
+ newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity)
+ if err != nil {
+ return nil, err
+ }
+ sigs = append(slices.Clone(sigs), newSigs...)
+
+ c.Printf("Storing list signatures\n")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
+ return nil, fmt.Errorf("writing signatures: %w", err)
+ }
+
+ return manifestList, nil
+}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go
new file mode 100644
index 000000000..59f41d216
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go
@@ -0,0 +1,177 @@
+package copy
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "time"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/vbauerster/mpb/v8"
+ "github.com/vbauerster/mpb/v8/decor"
+)
+
+// newProgressPool creates a *mpb.Progress.
+// The caller must eventually call pool.Wait() after the pool will no longer be updated.
+// NOTE: Every progress bar created within the progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
+func (c *copier) newProgressPool() *mpb.Progress {
+ return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
+}
+
+// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
+func customPartialBlobDecorFunc(s decor.Statistics) string {
+ current := decor.SizeB1024(s.Current)
+ total := decor.SizeB1024(s.Total)
+ refill := decor.SizeB1024(s.Refill)
+ if s.Total == 0 {
+ return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill)
+ }
+ // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%")
+ if s.Refill == 0 {
+ return fmt.Sprintf("%.1f / %.1f", current, total)
+ }
+ percentage := 100.0 * float64(s.Refill) / float64(s.Total)
+ return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage)
+}
+
+// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
+type progressBar struct {
+ *mpb.Bar
+ originalSize int64 // or -1 if unknown
+}
+
+// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
+// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo()
+// to print a single line instead.
+//
+// NOTE: Every progress bar created within a progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
+//
+// As a convention, most users of progress bars should call mark100PercentComplete on full success;
+// by convention, we don't leave progress bars in partial state when fully done
+// (even if we copied much less data than anticipated).
+func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
+ // shortDigestLen is the length of the digest used for blobs.
+ const shortDigestLen = 12
+
+ if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return nil, err
+ }
+ prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+ // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
+ maxPrefixLen := len("Copying blob ") + shortDigestLen
+ if len(prefix) > maxPrefixLen {
+ prefix = prefix[:maxPrefixLen]
+ }
+
+ // onComplete will replace prefix once the bar/spinner has completed
+ onComplete = prefix + " " + onComplete
+
+ // Use a normal progress bar when we know the size (i.e., size > 0).
+ // Otherwise, use a spinner to indicate that something's happening.
+ var bar *mpb.Bar
+ if info.Size > 0 {
+ if partial {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.Any(customPartialBlobDecorFunc),
+ ),
+ )
+ } else {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
+ decor.Name(" | "),
+ decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
+ ),
+ )
+ }
+ } else {
+ bar = pool.New(0,
+ mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
+ ),
+ )
+ }
+ return &progressBar{
+ Bar: bar,
+ originalSize: info.Size,
+ }, nil
+}
+
+// printCopyInfo prints a "Copying ..." message on the copier if the output is
+// set to `io.Discard`. In that case, the progress bars won't be rendered but
+// we still want to indicate when blobs and configs are copied.
+func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
+ if c.progressOutput == io.Discard {
+ c.Printf("Copying %s %s\n", kind, info.Digest)
+ }
+}
+
+// mark100PercentComplete marks the progress bars as 100% complete;
+// it may do so by possibly advancing the current state if it is below the known total.
+func (bar *progressBar) mark100PercentComplete() {
+ if bar.originalSize > 0 {
+ // We can't call bar.SetTotal even if we wanted to; the total can not be changed
+ // after a progress bar is created with a definite total.
+ bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
+ } else {
+ // -1 = unknown size
+ // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
+ // size (possible at least in theory), in mpb, zero-sized progress bars are treated
+ // as unknown size, in particular they are not configured to be marked as
+ // complete on bar.Current() reaching bar.total (because that would happen already
+ // when creating the progress bar).
+ // That means that we are both _allowed_ to call SetTotal, and we _have to_.
+ bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
+ }
+}
+
+// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
+// with the number of received bytes.
+type blobChunkAccessorProxy struct {
+ wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
+ bar *progressBar // A progress bar updated with the number of bytes read so far
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+// If the Length for the last chunk is set to math.MaxUint64, then it
+// fully fetches the remaining data from the offset to the end of the blob.
+func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ start := time.Now()
+ rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
+ if err == nil {
+ total := int64(0)
+ for _, c := range chunks {
+ // do not update the progress bar if there is a chunk with unknown length.
+ if c.Length == math.MaxUint64 {
+ return rc, errs, err
+ }
+ total += int64(c.Length)
+ }
+ s.bar.EwmaIncrInt64(total, time.Since(start))
+ }
+ return rc, errs, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_channel.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go
new file mode 100644
index 000000000..d5e9e09bd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go
@@ -0,0 +1,79 @@
+package copy
+
+import (
+ "io"
+ "time"
+
+ "github.com/containers/image/v5/types"
+)
+
+// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
+type progressReader struct {
+ source io.Reader
+ channel chan<- types.ProgressProperties
+ interval time.Duration
+ artifact types.BlobInfo
+ lastUpdate time.Time
+ offset uint64
+ offsetUpdate uint64
+}
+
+// newProgressReader creates a new progress reader for:
+// `source`: The source when internally reading bytes
+// `channel`: The reporter channel to which the progress will be sent
+// `interval`: The update interval to indicate how often the progress should update
+// `artifact`: The blob metadata which is currently being progressed
+func newProgressReader(
+ source io.Reader,
+ channel chan<- types.ProgressProperties,
+ interval time.Duration,
+ artifact types.BlobInfo,
+) *progressReader {
+ // The progress reader constructor informs the progress channel
+ // that a new artifact will be read
+ channel <- types.ProgressProperties{
+ Event: types.ProgressEventNewArtifact,
+ Artifact: artifact,
+ }
+ return &progressReader{
+ source: source,
+ channel: channel,
+ interval: interval,
+ artifact: artifact,
+ lastUpdate: time.Now(),
+ offset: 0,
+ offsetUpdate: 0,
+ }
+}
+
+// reportDone indicates to the internal channel that the progress has been
+// finished
+func (r *progressReader) reportDone() {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventDone,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+}
+
+// Read continuously reads bytes into the progress reader and reports the
+// status via the internal channel
+func (r *progressReader) Read(p []byte) (int, error) {
+ n, err := r.source.Read(p)
+ r.offset += uint64(n)
+ r.offsetUpdate += uint64(n)
+
+ // Fire the progress reader in the provided interval
+ if time.Since(r.lastUpdate) > r.interval {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventRead,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+ r.lastUpdate = time.Now()
+ r.offsetUpdate = 0
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go
deleted file mode 100644
index 0761065a2..000000000
--- a/vendor/github.com/containers/image/v5/copy/progress_reader.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package copy
-
-import (
- "io"
- "time"
-
- "github.com/containers/image/v5/types"
-)
-
-// progressReader is a reader that reports its progress on an interval.
-type progressReader struct {
- source io.Reader
- channel chan<- types.ProgressProperties
- interval time.Duration
- artifact types.BlobInfo
- lastUpdate time.Time
- offset uint64
- offsetUpdate uint64
-}
-
-// newProgressReader creates a new progress reader for:
-// `source`: The source when internally reading bytes
-// `channel`: The reporter channel to which the progress will be sent
-// `interval`: The update interval to indicate how often the progress should update
-// `artifact`: The blob metadata which is currently being progressed
-func newProgressReader(
- source io.Reader,
- channel chan<- types.ProgressProperties,
- interval time.Duration,
- artifact types.BlobInfo,
-) *progressReader {
- // The progress reader constructor informs the progress channel
- // that a new artifact will be read
- channel <- types.ProgressProperties{
- Event: types.ProgressEventNewArtifact,
- Artifact: artifact,
- }
- return &progressReader{
- source: source,
- channel: channel,
- interval: interval,
- artifact: artifact,
- lastUpdate: time.Now(),
- offset: 0,
- offsetUpdate: 0,
- }
-}
-
-// reportDone indicates to the internal channel that the progress has been
-// finished
-func (r *progressReader) reportDone() {
- r.channel <- types.ProgressProperties{
- Event: types.ProgressEventDone,
- Artifact: r.artifact,
- Offset: r.offset,
- OffsetUpdate: r.offsetUpdate,
- }
-}
-
-// Read continuously reads bytes into the progress reader and reports the
-// status via the internal channel
-func (r *progressReader) Read(p []byte) (int, error) {
- n, err := r.source.Read(p)
- r.offset += uint64(n)
- r.offsetUpdate += uint64(n)
-
- // Fire the progress reader in the provided interval
- if time.Since(r.lastUpdate) > r.interval {
- r.channel <- types.ProgressProperties{
- Event: types.ProgressEventRead,
- Artifact: r.artifact,
- Offset: r.offset,
- OffsetUpdate: r.offsetUpdate,
- }
- r.lastUpdate = time.Now()
- r.offsetUpdate = 0
- }
- return n, err
-}
diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index 8f46e9de6..7ddfe917b 100644
--- a/vendor/github.com/containers/image/v5/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -1,31 +1,115 @@
package copy
import (
- "github.com/containers/image/v5/signature"
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ internalsig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature/sigstore"
+ "github.com/containers/image/v5/signature/simplesigning"
"github.com/containers/image/v5/transports"
- "github.com/pkg/errors"
)
-// createSignature creates a new signature of manifest using keyIdentity.
-func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
- mech, err := signature.NewGPGSigningMechanism()
- if err != nil {
- return nil, errors.Wrap(err, "Error initializing GPG")
+// setupSigners initializes c.signers.
+func (c *copier) setupSigners() error {
+ c.signers = append(c.signers, c.options.Signers...)
+ // c.signersToClose is intentionally not updated with c.options.Signers.
+
+ // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need
+ // to clean up any created signers on failure.
+
+ if c.options.SignBy != "" {
+ opts := []simplesigning.Option{
+ simplesigning.WithKeyFingerprint(c.options.SignBy),
+ }
+ if c.options.SignPassphrase != "" {
+ opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase))
+ }
+ signer, err := simplesigning.NewSigner(opts...)
+ if err != nil {
+ return err
+ }
+ c.signers = append(c.signers, signer)
+ c.signersToClose = append(c.signersToClose, signer)
+ }
+
+ if c.options.SignBySigstorePrivateKeyFile != "" {
+ signer, err := sigstore.NewSigner(
+ sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase),
+ )
+ if err != nil {
+ return err
+ }
+ c.signers = append(c.signers, signer)
+ c.signersToClose = append(c.signersToClose, signer)
+ }
+
+ return nil
+}
+
+// sourceSignatures returns signatures from unparsedSource,
+// and verifies that they can be used (to avoid copying a large image when we
+// can tell in advance that it would ultimately fail)
+func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage,
+ gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
+ var sigs []internalsig.Signature
+ if c.options.RemoveSignatures {
+ sigs = []internalsig.Signature{}
+ } else {
+ c.Printf("%s\n", gettingSignaturesMessage)
+ s, err := unparsed.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading signatures: %w", err)
+ }
+ sigs = s
}
- defer mech.Close()
- if err := mech.SupportsSigning(); err != nil {
- return nil, errors.Wrap(err, "Signing not supported")
+ if len(sigs) != 0 {
+ c.Printf("%s\n", checkingDestMessage)
+ if err := c.dest.SupportsSignatures(ctx); err != nil {
+ return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
+ }
+ }
+ return sigs, nil
+}
+
+// createSignatures creates signatures for manifest and an optional identity.
+func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) {
+ if len(c.signers) == 0 {
+ // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible.
+ return nil, nil
}
- dockerReference := c.dest.Reference().DockerReference()
- if dockerReference == nil {
- return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ if identity != nil {
+ if reference.IsNameOnly(identity) {
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
+ }
+ } else {
+ identity = c.dest.Reference().DockerReference()
+ if identity == nil {
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ }
}
- c.Printf("Signing manifest\n")
- newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
- if err != nil {
- return nil, errors.Wrap(err, "Error creating signature")
+ res := make([]internalsig.Signature, 0, len(c.signers))
+ for signerIndex, signer := range c.signers {
+ msg := internalSigner.ProgressMessage(signer)
+ if len(c.signers) == 1 {
+ c.Printf("Creating signature: %s\n", msg)
+ } else {
+ c.Printf("Creating signature %d: %s\n", signerIndex+1, msg)
+ }
+ newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity)
+ if err != nil {
+ if len(c.signers) == 1 {
+ return nil, fmt.Errorf("creating signature: %w", err)
+ } else {
+ return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err)
+ }
+ }
+ res = append(res, newSig)
}
- return newSig, nil
+ return res, nil
}
diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go
new file mode 100644
index 000000000..e008c7e86
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/single.go
@@ -0,0 +1,997 @@
+package copy
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "reflect"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/pkg/platform"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ chunkedToc "github.com/containers/storage/pkg/chunked/toc"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/vbauerster/mpb/v8"
+)
+
+// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
+type imageCopier struct {
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src *image.SourcedImage
+ manifestConversionPlan manifestConversionPlan
+ diffIDsAreNeeded bool
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+ canSubstituteBlobs bool
+ compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
+ compressionLevel *int
+ requireCompressionFormatMatch bool
+}
+
+type copySingleImageOptions struct {
+ requireCompressionFormatMatch bool
+ compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
+ compressionLevel *int
+}
+
+// copySingleImageResult carries data produced by copySingleImage
+type copySingleImageResult struct {
+ manifest []byte
+ manifestMIMEType string
+ manifestDigest digest.Digest
+ compressionAlgorithms []compressiontypes.Algorithm
+}
+
+// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate
+// source image admissibility.
+func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) {
+ // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
+ // Make sure we fail cleanly in such cases.
+ multiImage, err := isMultiImage(ctx, unparsedImage)
+ if err != nil {
+ // FIXME FIXME: How to name a reference for the sub-image?
+ return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
+ }
+ if multiImage {
+ return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
+ }
+
+ // Please keep this policy check BEFORE reading any other information about the image.
+ // (The multiImage check above only matches the MIME type, which we have received anyway.
+ // Actual parsing of anything should be deferred.)
+ if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
+ return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err)
+ }
+ src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage)
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
+ }
+
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
+ // one item from a manifest list that matches it, accept that as a match.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ manifestList, _, err := c.unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err)
+ }
+ matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference")
+ }
+ }
+ }
+ }
+
+ if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ sigs, err := c.sourceSignatures(ctx, src,
+ "Getting image source signatures",
+ "Checking if image destination supports signatures")
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ // Determine if we're allowed to modify the manifest.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copyMultipleImages.
+ cannotModifyManifestReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestReason = "Destination specifies a digest"
+ }
+ if c.options.PreserveDigests {
+ cannotModifyManifestReason = "Instructed to preserve digests"
+ }
+
+ ic := imageCopier{
+ c: c,
+ manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
+ src: src,
+ // manifestConversionPlan and diffIDsAreNeeded are computed later
+ cannotModifyManifestReason: cannotModifyManifestReason,
+ requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
+ }
+ if opts.compressionFormat != nil {
+ ic.compressionFormat = opts.compressionFormat
+ ic.compressionLevel = opts.compressionLevel
+ } else if c.options.DestinationCtx != nil {
+ // Note that compressionFormat and compressionLevel can be nil.
+ ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
+ ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
+ }
+ // HACK: Don’t combine zstd:chunked and encryption.
+ // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption
+ // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have.
+ // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without
+ // encryption. (That can be determined from the unencrypted config anyway, but, still...)
+ //
+ // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of
+ // hard-coding zstd:chunked / zstd.
+ if ic.c.options.OciEncryptLayers != nil {
+ format := ic.compressionFormat
+ if format == nil {
+ format = defaultCompressionFormat
+ }
+ if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
+ if ic.requireCompressionFormatMatch {
+ return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
+ }
+ logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead")
+ ic.compressionFormat = &compression.Zstd
+ }
+ }
+
+ // Decide whether we can substitute blobs with semantic equivalents:
+ // - Don’t do that if we can’t modify the manifest at all
+ // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
+
+ if err := ic.updateEmbeddedDockerReference(); err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
+
+ ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: ic.src.ManifestMIMEType,
+ destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
+ forceManifestMIMEType: c.options.ForceManifestMIMEType,
+ requestedCompressionFormat: ic.compressionFormat,
+ requiresOCIEncryption: destRequiresOciEncryption,
+ cannotModifyManifestReason: ic.cannotModifyManifestReason,
+ })
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+ // We set up this part of ic.manifestUpdates quite early, not just around the
+ // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
+ // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
+ // on the expected destination format.
+ if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion {
+ ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType
+ }
+
+ // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
+ ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
+
+ // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
+ if c.options.OptimizeDestinationImageAlreadyExists {
+ shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
+ noPendingManifestUpdates := ic.noPendingManifestUpdates()
+
+ logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
+ if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
+ matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
+ if err != nil {
+ logrus.Warnf("Failed to compare destination image manifest: %v", err)
+ return copySingleImageResult{}, err
+ }
+
+ if matchedResult != nil {
+ c.Printf("Skipping: image already present at destination\n")
+ return *matchedResult, nil
+ }
+ }
+ }
+
+ compressionAlgos, err := ic.copyLayers(ctx)
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
+ // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
+ // without actually trying to upload something and getting a types.ManifestTypeRejectedError.
+ // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
+ // we're altering how they're compressed. If the process succeeds, fine…
+ manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
+ wipResult := copySingleImageResult{
+ manifest: manifestBytes,
+ manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType,
+ manifestDigest: manifestDigest,
+ }
+ if err != nil {
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err)
+ // … if it fails, and the failure is either because the manifest is rejected by the registry, or
+ // because we failed to create a manifest of the specified type because the specific manifest type
+ // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
+ // have other options available that could still succeed.
+ var manifestTypeRejectedError types.ManifestTypeRejectedError
+ var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
+ isManifestRejected := errors.As(err, &manifestTypeRejectedError)
+ isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
+ if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 {
+ // We don’t have other options.
+ // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
+ // Don’t bother the user with MIME types if we have no choice.
+ return copySingleImageResult{}, err
+ }
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType.
+ // So if we are here, we will definitely be trying to convert the manifest.
+ // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
+ // so let’s bail out early and with a better error message.
+ if ic.cannotModifyManifestReason != "" {
+ return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
+ }
+
+ // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
+ errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)}
+ for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates {
+ logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
+ ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
+ attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
+ if err != nil {
+ logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
+ continue
+ }
+
+ // We have successfully uploaded a manifest.
+ wipResult = copySingleImageResult{
+ manifest: attemptedManifest,
+ manifestMIMEType: manifestMIMEType,
+ manifestDigest: attemptedManifestDigest,
+ }
+ errs = nil // Mark this as a success so that we don't abort below.
+ break
+ }
+ if errs != nil {
+ return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+ }
+ if targetInstance != nil {
+ targetInstance = &wipResult.manifestDigest
+ }
+
+ newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity)
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+ sigs = append(slices.Clone(sigs), newSigs...)
+
+ if len(sigs) > 0 {
+ c.Printf("Storing signatures\n")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
+ return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err)
+ }
+ }
+ wipResult.compressionAlgorithms = compressionAlgos
+ res := wipResult // We are done
+ return res, nil
+}
+
+// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
+func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
+ if dest.MustMatchRuntimeOS() {
+ c, err := src.OCIConfig(ctx)
+ if err != nil {
+ return fmt.Errorf("parsing image configuration: %w", err)
+ }
+ wantedPlatforms := platform.WantedPlatforms(sys)
+
+ options := newOrderedSet()
+ match := false
+ for _, wantedPlatform := range wantedPlatforms {
+ // For a transitional period, this might trigger warnings because the Variant
+ // field was added to OCI config only recently. If this turns out to be too noisy,
+ // revert this check to only look for (OS, Architecture).
+ if platform.MatchesPlatform(c.Platform, wantedPlatform) {
+ match = true
+ break
+ }
+ options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
+ }
+ if !match {
+ logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
+ c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
+ }
+ }
+ return nil
+}
+
+// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
+func (ic *imageCopier) updateEmbeddedDockerReference() error {
+ if ic.c.dest.IgnoresEmbeddedDockerReference() {
+ return nil // Destination would prefer us not to update the embedded reference.
+ }
+ destRef := ic.c.dest.Reference().DockerReference()
+ if destRef == nil {
+ return nil // Destination does not care about Docker references
+ }
+ if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
+ return nil // No reference embedded in the manifest, or it matches destRef already.
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
+ transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
+ }
+ ic.manifestUpdates.EmbeddedDockerReference = destRef
+ return nil
+}
+
+func (ic *imageCopier) noPendingManifestUpdates() bool {
+ return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
+}
+
+// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the
+// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise.
+func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) {
+ srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("calculating manifest digest: %w", err)
+ }
+
+ destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
+ if err != nil {
+ logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
+ return nil, nil
+ }
+ defer destImageSource.Close()
+
+ destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
+ if err != nil {
+ logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
+ return nil, nil
+ }
+
+ destManifestDigest, err := manifest.Digest(destManifest)
+ if err != nil {
+ return nil, fmt.Errorf("calculating manifest digest: %w", err)
+ }
+
+ logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
+ if srcManifestDigest != destManifestDigest {
+ return nil, nil
+ }
+
+ compressionAlgos := set.New[string]()
+ for _, srcInfo := range ic.src.LayerInfos() {
+ _, c, err := compressionEditsFromBlobInfo(srcInfo)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ compressionAlgos.Add(c.Name())
+ }
+ }
+
+ algos, err := algorithmsByNames(compressionAlgos.Values())
+ if err != nil {
+ return nil, err
+ }
+
+ // Destination and source manifests, types and digests should all be equivalent
+ return ©SingleImageResult{
+ manifest: destManifest,
+ manifestMIMEType: destManifestType,
+ manifestDigest: srcManifestDigest,
+ compressionAlgorithms: algos,
+ }, nil
+}
+
+// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
+func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
+ srcInfos := ic.src.LayerInfos()
+ updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
+ if err != nil {
+ return nil, err
+ }
+ srcInfosUpdated := false
+ if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
+ }
+ srcInfos = updatedSrcInfos
+ srcInfosUpdated = true
+ }
+
+ type copyLayerData struct {
+ destInfo types.BlobInfo
+ diffID digest.Digest
+ err error
+ }
+
+ // The manifest is used to extract the information whether a given
+ // layer is empty.
+ man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+ manifestLayerInfos := man.LayerInfos()
+
+ // copyGroup is used to determine if all layers are copied
+ copyGroup := sync.WaitGroup{}
+
+ data := make([]copyLayerData, len(srcInfos))
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
+ defer copyGroup.Done()
+ cld := copyLayerData{}
+ if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
+ // DiffIDs are, currently, needed only when converting from schema1.
+ // In which case src.LayerInfos will not have URLs because schema1
+ // does not support them.
+ if ic.diffIDsAreNeeded {
+ cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
+ } else {
+ cld.destInfo = srcLayer
+ logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
+ }
+ } else {
+ cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
+ }
+ data[index] = cld
+ }
+
+ // Decide which layers to encrypt
+ layersToEncrypt := set.New[int]()
+ if ic.c.options.OciEncryptLayers != nil {
+ totalLayers := len(srcInfos)
+ for _, l := range *ic.c.options.OciEncryptLayers {
+ switch {
+ case l >= 0 && l < totalLayers:
+ layersToEncrypt.Add(l)
+ case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers
+ layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed.
+ default:
+ return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers)
+ }
+ }
+
+ if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layersâ€
+ for i := 0; i < len(srcInfos); i++ {
+ layersToEncrypt.Add(i)
+ }
+ }
+ }
+
+ if err := func() error { // A scope for defer
+ progressPool := ic.c.newProgressPool()
+ defer progressPool.Wait()
+
+ // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
+ defer copyGroup.Wait()
+
+ for i, srcLayer := range srcInfos {
+ if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying layer: %w", err)
+ }
+ copyGroup.Add(1)
+ go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
+ }
+
+ // A call to copyGroup.Wait() is done at this point by the defer above.
+ return nil
+ }(); err != nil {
+ return nil, err
+ }
+
+ compressionAlgos := set.New[string]()
+ destInfos := make([]types.BlobInfo, len(srcInfos))
+ diffIDs := make([]digest.Digest, len(srcInfos))
+ for i, cld := range data {
+ if cld.err != nil {
+ return nil, cld.err
+ }
+ if cld.destInfo.CompressionAlgorithm != nil {
+ compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name())
+ }
+ destInfos[i] = cld.destInfo
+ diffIDs[i] = cld.diffID
+ }
+
+ // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
+ // OptimizeDestinationImageAlreadyExists short-circuit conditions
+ ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
+ if ic.diffIDsAreNeeded {
+ ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
+ }
+ if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
+ ic.manifestUpdates.LayerInfos = destInfos
+ }
+ algos, err := algorithmsByNames(compressionAlgos.Values())
+ if err != nil {
+ return nil, err
+ }
+ return algos, nil
+}
+
+// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
+func layerDigestsDiffer(a, b []types.BlobInfo) bool {
+ return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
+ return a.Digest == b.Digest
+ })
+}
+
+// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
+// stores the resulting config and manifest to the destination, and returns the stored manifest
+// and its digest.
+func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
+ var pendingImage types.Image = ic.src
+ if !ic.noPendingManifestUpdates() {
+ if ic.cannotModifyManifestReason != "" {
+ return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
+ }
+ if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
+ // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
+ // So, this can only happen if we are trying to upload using one of the other MIME type candidates.
+ // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
+ // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
+ // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
+ // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
+ return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ }
+ pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
+ if err != nil {
+ return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
+ }
+ pendingImage = pi
+ }
+ man, _, err := pendingImage.Manifest(ctx)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest: %w", err)
+ }
+
+ if err := ic.copyConfig(ctx, pendingImage); err != nil {
+ return nil, "", err
+ }
+
+ ic.c.Printf("Writing manifest to image destination\n")
+ manifestDigest, err := manifest.Digest(man)
+ if err != nil {
+ return nil, "", err
+ }
+ if instanceDigest != nil {
+ instanceDigest = &manifestDigest
+ }
+ if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
+ logrus.Debugf("Error %v while writing manifest %q", err, string(man))
+ return nil, "", fmt.Errorf("writing manifest: %w", err)
+ }
+ return man, manifestDigest, nil
+}
+
+// copyConfig copies config.json, if any, from src to dest.
+func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
+ srcInfo := src.ConfigInfo()
+ if srcInfo.Digest != "" {
+ if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying config: %w", err)
+ }
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
+
+ destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
+ progressPool := ic.c.newProgressPool()
+ defer progressPool.Wait()
+ bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer bar.Abort(false)
+ ic.c.printCopyInfo("config", srcInfo)
+
+ configBlob, err := src.ConfigBlob(ctx)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
+ }
+
+ destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ bar.mark100PercentComplete()
+ return destInfo, nil
+ }()
+ if err != nil {
+ return err
+ }
+ if destInfo.Digest != srcInfo.Digest {
+ return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ }
+ }
+ return nil
+}
+
+// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
+// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
+type diffIDResult struct {
+ digest digest.Digest
+ err error
+}
+
+// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
+// for types.BlobInfo.
+func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) {
+ // This MIME type → compression mapping belongs in manifest-specific code in our manifest
+ // package (but we should preferably replace/change UpdatedImage instead of productizing
+ // this workaround).
+ switch srcInfo.MediaType {
+ case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
+ return types.PreserveOriginal, &compression.Gzip, nil
+ case imgspecv1.MediaTypeImageLayerZstd:
+ tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
+ if err != nil {
+ return types.PreserveOriginal, nil, err
+ }
+ if tocDigest != nil {
+ return types.PreserveOriginal, &compression.ZstdChunked, nil
+ }
+ return types.PreserveOriginal, &compression.Zstd, nil
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
+ return types.Decompress, nil, nil
+ default:
+ return types.PreserveOriginal, nil, nil
+ }
+}
+
+// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
+// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
+// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
+ // If the srcInfo doesn't contain compression information, try to compute it from the
+ // MediaType, which was either read from a manifest by way of LayerInfos() or constructed
+ // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
+ // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
+ // which uses the compression information to compute the updated MediaType values.
+ // (Sadly UpdatedImage() is documented to not update MediaTypes from
+ // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
+ if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
+ op, algo, err := compressionEditsFromBlobInfo(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+ srcInfo.CompressionOperation = op
+ srcInfo.CompressionAlgorithm = algo
+ }
+
+ ic.c.printCopyInfo("blob", srcInfo)
+
+ diffIDIsNeeded := false
+ var cachedDiffID digest.Digest = ""
+ if ic.diffIDsAreNeeded {
+ cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded = cachedDiffID == ""
+ }
+ // When encrypting to decrypting, only use the simple code path. We might be able to optimize more
+ // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
+ // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
+ encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil)
+ canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
+
+ // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
+ if canAvoidProcessingCompleteLayer {
+ canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
+ logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
+ srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
+ canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression
+
+ var requiredCompression *compressiontypes.Algorithm
+ if ic.requireCompressionFormatMatch {
+ requiredCompression = ic.compressionFormat
+ }
+
+ var tocDigest digest.Digest
+
+ // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
+ d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+ if d != nil {
+ tocDigest = *d
+ }
+
+ reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ CanSubstitute: canSubstitute,
+ EmptyLayer: emptyLayer,
+ LayerIndex: &layerIndex,
+ SrcRef: srcRef,
+ PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...),
+ RequiredCompression: requiredCompression,
+ OriginalCompression: srcInfo.CompressionAlgorithm,
+ TOCDigest: tocDigest,
+ })
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
+ }
+ if reused {
+ logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
+ if err := func() error { // A scope for defer
+ label := "skipped: already exists"
+ if reusedBlob.MatchedByTOCDigest {
+ label = "skipped: already exists (found by TOC)"
+ }
+ bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
+ if err != nil {
+ return err
+ }
+ defer bar.Abort(false)
+ bar.mark100PercentComplete()
+ return nil
+ }(); err != nil {
+ return types.BlobInfo{}, "", err
+ }
+
+ // Throw an event that the layer has been skipped
+ if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
+ ic.c.options.Progress <- types.ProgressProperties{
+ Event: types.ProgressEventSkipped,
+ Artifact: srcInfo,
+ }
+ }
+
+ return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
+ }
+ }
+
+ // A partial pull is managed by the destination storage, that decides what portions
+ // of the source file are not known yet and must be fetched.
+ // Attempt a partial only when the source allows to retrieve a blob partially and
+ // the destination has support for it.
+ if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
+ reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
+ bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ hideProgressBar := true
+ defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
+ bar.Abort(hideProgressBar)
+ }()
+
+ proxy := blobChunkAccessorProxy{
+ wrapped: ic.c.rawSource,
+ bar: bar,
+ }
+ uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{
+ Cache: ic.c.blobInfoCache,
+ LayerIndex: layerIndex,
+ })
+ if err == nil {
+ if srcInfo.Size != -1 {
+ refill := srcInfo.Size - bar.Current()
+ bar.SetCurrent(srcInfo.Size)
+ bar.SetRefill(refill)
+ }
+ bar.mark100PercentComplete()
+ hideProgressBar = false
+ logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
+ return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
+ }
+ // On a "partial content not available" error, ignore it and retrieve the whole layer.
+ var perr private.ErrFallbackToOrdinaryLayerDownload
+ if errors.As(err, &perr) {
+ logrus.Debugf("Failed to retrieve partial blob: %v", err)
+ return false, types.BlobInfo{}, nil
+ }
+ return false, types.BlobInfo{}, err
+ }()
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err)
+ }
+ if reused {
+ return blobInfo, cachedDiffID, nil
+ }
+ }
+
+ // Fallback: copy the layer, computing the diffID if we need to do so
+ return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
+ bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+ defer bar.Abort(false)
+
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ defer srcStream.Close()
+
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+
+ diffID := cachedDiffID
+ if diffIDIsNeeded {
+ select {
+ case <-ctx.Done():
+ return types.BlobInfo{}, "", ctx.Err()
+ case diffIDResult := <-diffIDChan:
+ if diffIDResult.err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
+ }
+ logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptingOrDecrypting {
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ }
+ diffID = diffIDResult.digest
+ }
+ }
+
+ bar.mark100PercentComplete()
+ return blobInfo, diffID, nil
+ }()
+}
+
+// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
+func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
+ // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
+ // compression format if the blob was substituted.
+ // Handling of compression, encryption, and the related MIME types and the like are all the responsibility
+ // of the generic code in this package.
+ res := types.BlobInfo{
+ Digest: reusedBlob.Digest,
+ Size: reusedBlob.Size,
+ URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
+ // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t
+ // (but those annotations being left with incorrect values should not break pulls).
+ Annotations: maps.Clone(inputInfo.Annotations),
+ MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
+ CompressionOperation: reusedBlob.CompressionOperation,
+ CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
+ CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
+ }
+ // The transport is only expected to fill CompressionOperation and CompressionAlgorithm
+ // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
+ // on what we know from the srcInfos we were given.
+ if reusedBlob.Digest == inputInfo.Digest {
+ if res.CompressionOperation == types.PreserveOriginal {
+ res.CompressionOperation = inputInfo.CompressionOperation
+ }
+ if res.CompressionAlgorithm == nil {
+ res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
+ }
+ }
+ if len(reusedBlob.CompressionAnnotations) != 0 {
+ if res.Annotations == nil {
+ res.Annotations = map[string]string{}
+ }
+ maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
+ }
+ return res
+}
+
+// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer†scope.
+// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
+// perhaps (de/re/)compressing the stream,
+// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
+func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
+ diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
+ var diffIDChan chan diffIDResult
+
+ err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
+ if diffIDIsNeeded {
+ diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
+ pipeReader, pipeWriter := io.Pipe()
+ defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
+ // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
+ // reading from the pipe has failed, we don’t really care.
+ // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
+ // the return value includes an error indication, which we do check.
+ //
+ // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
+ // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
+ go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
+ return pipeWriter
+ }
+ }
+
+ blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
+ return blobInfo, diffIDChan, err
+ // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
+}
+
+// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
+ result := diffIDResult{
+ digest: "",
+ err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
+ }
+ defer func() { dest <- result }()
+ defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
+
+ result.digest, result.err = computeDiffID(layerStream, decompressor)
+}
+
+// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
+func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
+ if decompressor != nil {
+ s, err := decompressor(stream)
+ if err != nil {
+ return "", err
+ }
+ defer s.Close()
+ stream = s
+ }
+
+ return digest.Canonical.FromReader(stream)
+}
+
+// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
+func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
+ result := []compressiontypes.Algorithm{}
+ for _, name := range names {
+ algo, err := compression.AlgorithmByName(name)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, algo)
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 5cafd2674..000c8987d 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -2,15 +2,21 @@ package directory
import (
"context"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -21,34 +27,50 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
- ref dirReference
- compress bool
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
+ ref dirReference
}
// newImageDestination returns an ImageDestination for writing to a directory.
-func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
- d := &dirImageDestination{ref: ref, compress: compress}
+func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) {
+ desiredLayerCompression := types.PreserveOriginal
+ if sys != nil {
+ if sys.DirForceCompress {
+ desiredLayerCompression = types.Compress
+
+ if sys.DirForceDecompress {
+ return nil, fmt.Errorf("Cannot compress and decompress at the same time")
+ }
+ }
+ if sys.DirForceDecompress {
+ desiredLayerCompression = types.Decompress
+ }
+ }
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
// if the contents don't match throw an error
- dirExists, err := pathExists(d.ref.resolvedPath)
+ dirExists, err := pathExists(ref.resolvedPath)
if err != nil {
- return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
+ return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
}
if dirExists {
- isEmpty, err := isDirEmpty(d.ref.resolvedPath)
+ isEmpty, err := isDirEmpty(ref.resolvedPath)
if err != nil {
return nil, err
}
if !isEmpty {
- versionExists, err := pathExists(d.ref.versionPath())
+ versionExists, err := pathExists(ref.versionPath())
if err != nil {
- return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
+ return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
}
if versionExists {
- contents, err := ioutil.ReadFile(d.ref.versionPath())
+ contents, err := os.ReadFile(ref.versionPath())
if err != nil {
return nil, err
}
@@ -60,22 +82,37 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
return nil, ErrNotContainerImageDir
}
// delete directory contents so that only one image is in the directory at a time
- if err = removeDirContents(d.ref.resolvedPath); err != nil {
- return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
+ if err = removeDirContents(ref.resolvedPath); err != nil {
+ return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
}
- logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
+ logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
}
} else {
// create directory if it doesn't exist
- if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
+ if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
+ return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
}
}
// create version file
- err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
+ err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
if err != nil {
- return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
+ return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
}
+
+ d := &dirImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: nil,
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ }
+ d.Compat = impl.AddCompat(d)
return d, nil
}
@@ -90,57 +127,17 @@ func (d *dirImageDestination) Close() error {
return nil
}
-func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
- return nil
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.compress {
- return types.Compress
- }
- return types.PreserveOriginal
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dirImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dirImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
succeeded := false
explicitClosed := false
@@ -153,20 +150,18 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@@ -175,43 +170,48 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
}
- blobPath := d.ref.layerPath(computedDigest)
+ blobPath, err := d.ref.layerPath(blobDigest)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
succeeded = true
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
+// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ return false, private.ReusedBlob{}, nil
+ }
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
+ }
+ blobPath, err := d.ref.layerPath(info.Digest)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
}
- blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, types.BlobInfo{}, nil
+ return false, private.ReusedBlob{}, nil
}
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
- return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
+ return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
@@ -223,32 +223,45 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
- return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
+ path, err := d.ref.manifestPath(instanceDigest)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(path, manifest, 0644)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
for i, sig := range signatures {
- if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ path, err := d.ref.signaturePath(i, instanceDigest)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(path, blob, 0644); err != nil {
return err
}
}
return nil
}
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *dirImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
return nil
}
// returns true if path exists
func pathExists(path string) (bool, error) {
- _, err := os.Stat(path)
+ err := fileutils.Exists(path)
if err == nil {
return true, nil
}
@@ -260,7 +273,7 @@ func pathExists(path string) (bool, error) {
// returns true if directory is empty
func isDirEmpty(path string) (bool, error) {
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return false, err
}
@@ -269,7 +282,7 @@ func isDirEmpty(path string) (bool, error) {
// deletes the contents of a directory
func removeDirContents(path string) error {
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go
index ad9129d40..6d725bcfa 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_src.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_src.go
@@ -2,23 +2,41 @@ package directory
import (
"context"
+ "fmt"
"io"
- "io/ioutil"
"os"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
type dirImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref dirReference
}
// newImageSource returns an ImageSource reading from an existing directory.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ref dirReference) types.ImageSource {
- return &dirImageSource{ref}
+func newImageSource(ref dirReference) private.ImageSource {
+ s := &dirImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s
}
// Reference returns the reference used to set up this source, _as specified by the user_
@@ -37,23 +55,26 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
+ path, err := s.ref.manifestPath(instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+ m, err := os.ReadFile(path)
if err != nil {
return nil, "", err
}
return m, manifest.GuessMIMEType(m), err
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dirImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- r, err := os.Open(s.ref.layerPath(info.Digest))
+ path, err := s.ref.layerPath(info.Digest)
+ if err != nil {
+ return nil, -1, err
+ }
+ r, err := os.Open(path)
if err != nil {
return nil, -1, err
}
@@ -64,33 +85,29 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- signatures := [][]byte{}
+func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ signatures := []signature.Signature{}
for i := 0; ; i++ {
- signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
+ path, err := s.ref.signaturePath(i, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ sigBlob, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
break
}
return nil, err
}
+ signature, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, signature)
}
return signatures, nil
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index adfec6ef3..4f7d596b4 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -2,17 +2,17 @@ package directory
import (
"context"
+ "errors"
"fmt"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
// scope passed to this function will not be "", that value is always allowed.
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
@@ -48,7 +48,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
}
return nil
}
@@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dirReference) StringWithinTransport() string {
return ref.path
@@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src := newImageSource(ref)
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -153,38 +152,43 @@ func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemCon
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
- compress := false
- if sys != nil {
- compress = sys.DirForceCompress
- }
- return newImageDestination(ref, compress)
+ return newImageDestination(sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for dir: images")
+ return errors.New("Deleting images not implemented for dir: images")
}
// manifestPath returns a path for the manifest within a directory using our conventions.
-func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
+func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil {
- return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
+ if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
+ return "", err
+ }
+ return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
}
- return filepath.Join(ref.path, "manifest.json")
+ return filepath.Join(ref.path, "manifest.json"), nil
}
// layerPath returns a path for a layer tarball within a directory using our conventions.
-func (ref dirReference) layerPath(digest digest.Digest) string {
+func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
+ if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
+ return "", err
+ }
// FIXME: Should we keep the digest identification?
- return filepath.Join(ref.path, digest.Encoded())
+ return filepath.Join(ref.path, digest.Encoded()), nil
}
// signaturePath returns a path for a signature within a directory using our conventions.
-func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
+func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil {
- return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
+ if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
+ return "", err
+ }
+ return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
}
- return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
+ return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
}
// versionPath returns a path for the version file within a directory using our conventions.
diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
index 71136b880..69c1e0727 100644
--- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
+++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
@@ -1,18 +1,19 @@
package explicitfilepath
import (
+ "fmt"
"os"
"path/filepath"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/fileutils"
)
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
// To do so, all elements of the input path must exist; as a special case, the final component may be
// a non-existent name (but not a symlink pointing to a non-existent name)
-// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
+// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
func ResolvePathToFullyExplicit(path string) (string, error) {
- switch _, err := os.Lstat(path); {
+ switch err := fileutils.Lexists(path); {
case err == nil:
return resolveExistingPathToFullyExplicit(path)
case os.IsNotExist(err):
@@ -26,14 +27,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
// in the resulting path, and especially not at the end.
- return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
+ return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
}
resolvedPath := filepath.Join(resolvedParent, file)
// As a sanity check, ensure that there are no "." or ".." components.
cleanedResolvedPath := filepath.Clean(resolvedPath)
if cleanedResolvedPath != resolvedPath {
// Coverage: This should never happen.
- return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
+ return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
}
return resolvedPath, nil
default: // err != nil, unrecognized
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index e874e02e0..9e0d32007 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -2,54 +2,49 @@ package archive
import (
"context"
- "io"
+ "fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
- archive *tarfile.Writer // Should only be closed if writer != nil
- writer io.Closer // May be nil if the archive is shared
+ writer *Writer // Should be closed if closeWriter
+ closeWriter bool
}
-func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
if ref.sourceIndex != -1 {
- return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
+ return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
- var archive *tarfile.Writer
- var writer io.Closer
- if ref.archiveWriter != nil {
- archive = ref.archiveWriter
- writer = nil
+ var writer *Writer
+ var closeWriter bool
+ if ref.writer != nil {
+ writer = ref.writer
+ closeWriter = false
} else {
- fh, err := openArchiveForWriting(ref.path)
+ w, err := NewWriter(sys, ref.path)
if err != nil {
return nil, err
}
-
- archive = tarfile.NewWriter(fh)
- writer = fh
+ writer = w
+ closeWriter = true
+ }
+ d := &archiveImageDestination{
+ ref: ref,
+ writer: writer,
+ closeWriter: closeWriter,
}
- tarDest := tarfile.NewDestination(sys, archive, ref.ref)
+ tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref, d.CommitWithOptions)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
- return &archiveImageDestination{
- Destination: tarDest,
- ref: ref,
- archive: archive,
- writer: writer,
- }, nil
-}
-
-// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
-func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Decompress
+ d.Destination = tarDest
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -60,19 +55,26 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
- if d.writer != nil {
+ if d.closeWriter {
return d.writer.Close()
}
return nil
}
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if d.writer != nil {
- return d.archive.Close()
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *archiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ d.writer.imageCommitted()
+ if d.closeWriter {
+ // We could do this only in .Close(), but failures in .Close() are much more likely to be
+ // ignored by callers that use defer. So, in single-image destinations, try to complete
+ // the archive here.
+ // But if Commit() is never called, let .Close() clean up.
+ err := d.writer.Close()
+ d.closeWriter = false
+ return err
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go
index c7bb311bc..70c4fbc71 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go
@@ -1,11 +1,12 @@
package archive
import (
+ "fmt"
+
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader manages a single Docker archive, allows listing its contents and accessing
@@ -40,10 +41,10 @@ func (r *Reader) Close() error {
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
standalone, ok := ref.(archiveReference)
if !ok {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
if standalone.archiveReader != nil {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
}
reader, err := NewReader(sys, standalone.path)
if err != nil {
@@ -73,22 +74,22 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
for _, tag := range image.RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
+ return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
refs = append(refs, ref)
}
if len(refs) == 0 {
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
+ return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err)
}
refs = append(refs, ref)
}
@@ -107,7 +108,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
archiveRef, ok := ref.(archiveReference)
if !ok {
- return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index 7acca210e..c4ab9a8c1 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -1,9 +1,8 @@
package archive
import (
- "context"
-
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
)
@@ -14,7 +13,7 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
+func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {
@@ -28,7 +27,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveRe
archive = a
closeArchive = true
}
- src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
+ src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 9a48cb46c..39e92cac3 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -2,16 +2,16 @@ package archive
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
- ctrImage "github.com/containers/image/v5/image"
+ ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -53,40 +53,39 @@ type archiveReference struct {
// file, not necessarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
- archiveWriter *tarfile.Writer
+ writer *Writer
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if refString == "" {
- return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString)
+ return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString)
}
- parts := strings.SplitN(refString, ":", 2)
- path := parts[0]
+ path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
var nt reference.NamedTagged
sourceIndex := -1
- if len(parts) == 2 {
+ if gotTagOrIndex {
// A :tag or :@index was specified.
- if len(parts[1]) > 0 && parts[1][0] == '@' {
- i, err := strconv.Atoi(parts[1][1:])
+ if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
+ i, err := strconv.Atoi(tagOrIndex[1:])
if err != nil {
- return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
+ return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
}
if i < 0 {
- return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
+ return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
- ref, err := reference.ParseNormalizedNamed(parts[1])
+ ref, err := reference.ParseNormalizedNamed(tagOrIndex)
if err != nil {
- return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
- return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
+ return nil, fmt.Errorf("reference does not include a tag: %s", ref.String())
}
nt = refTagged
}
@@ -108,25 +107,25 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
- archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
+ archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
- return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if ref != nil && sourceIndex != -1 {
- return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
+ return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
}
if _, isDigest := ref.(reference.Canonical); isDigest {
- return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
+ return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String())
}
if sourceIndex != -1 && sourceIndex < 0 {
- return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
}
return archiveReference{
path: path,
ref: ref,
sourceIndex: sourceIndex,
archiveReader: archiveReader,
- archiveWriter: archiveWriter,
+ writer: writer,
}, nil
}
@@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
switch {
@@ -185,17 +184,13 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return ctrImage.FromSource(ctx, sys, src)
+ return ctrImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- return newImageSource(ctx, sys, ref)
+ return newImageSource(sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go
index afac2aaee..11f797c00 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go
@@ -1,38 +1,76 @@
package archive
import (
+ "errors"
+ "fmt"
"io"
"os"
+ "sync"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Writer manages a single in-progress Docker archive and allows adding images to it.
type Writer struct {
- path string // The original, user-specified path; not the maintained temporary file, if any
- archive *tarfile.Writer
- writer io.Closer
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ regularFile bool // path refers to a regular file (e.g. not a pipe)
+ archive *tarfile.Writer
+ writer io.Closer
+
+ // The following state can only be accessed with the mutex held.
+ mutex sync.Mutex
+ hadCommit bool // At least one successful commit has happened
}
// NewWriter returns a Writer for path.
// The caller should call .Close() on the returned object.
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
- fh, err := openArchiveForWriting(path)
+ // path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ fh.Close()
+ }
+ }()
+
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("statting file %q: %w", path, err)
+ }
+ regularFile := fhStat.Mode().IsRegular()
+ if regularFile && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
archive := tarfile.NewWriter(fh)
+ succeeded = true
return &Writer{
- path: path,
- archive: archive,
- writer: fh,
+ path: path,
+ regularFile: regularFile,
+ archive: archive,
+ writer: fh,
+ hadCommit: false,
}, nil
}
+// imageCommitted notifies the Writer that at least one image was successfully committed to the stream.
+func (w *Writer) imageCommitted() {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ w.hadCommit = true
+}
+
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
@@ -41,42 +79,25 @@ func (w *Writer) Close() error {
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
+ if err == nil && w.regularFile && !w.hadCommit {
+ // Writing to the destination never had a success; delete the destination if we created it.
+ // This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
+ // left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
+ // the caller manually deleting the partial archive. So, delete it instead.
+ //
+ // Archives with at least one successfully created image are left around; they might still be valuable.
+ //
+ // Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
+ // Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
+ if err2 := os.Remove(w.path); err2 != nil {
+ err = err2
+ }
+ }
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
- return newReference(w.path, destinationRef, -1, nil, w.archive)
-}
-
-// openArchiveForWriting opens path for writing a tar archive,
-// making a few sanity checks.
-func openArchiveForWriting(path string) (*os.File, error) {
- // path can be either a pipe or a regular file
- // in the case of a pipe, we require that we can open it for write
- // in the case of a regular file, we don't want to overwrite any pre-existing file
- // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
- // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
- fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
- if err != nil {
- return nil, errors.Wrapf(err, "error opening file %q", path)
- }
- succeeded := false
- defer func() {
- if !succeeded {
- fh.Close()
- }
- }()
- fhStat, err := fh.Stat()
- if err != nil {
- return nil, errors.Wrapf(err, "error statting file %q", path)
- }
-
- if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
- return nil, errors.New("docker-archive doesn't support modifying existing images")
- }
-
- succeeded = true
- return fh, nil
+ return newReference(w.path, destinationRef, -1, nil, w)
}
diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go
new file mode 100644
index 000000000..e69e21ef7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/body_reader.go
@@ -0,0 +1,253 @@
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/rand/v2"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry
+ bodyReaderMinimumProgress = 1 * 1024 * 1024
+ // bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry
+ bodyReaderMSSinceLastRetry = 60 * 1_000
+)
+
+// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
+// which can transparently resume some (very limited) kinds of aborted connections.
+type bodyReader struct {
+ ctx context.Context
+ c *dockerClient
+ path string // path to pass to makeRequest to retry
+ logURL *url.URL // a string to use in error messages
+ firstConnectionTime time.Time
+
+ body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
+ lastRetryOffset int64 // -1 if N/A
+ lastRetryTime time.Time // time.Time{} if N/A
+ offset int64 // Current offset within the blob
+ lastSuccessTime time.Time // time.Time{} if N/A
+}
+
+// newBodyReader creates a bodyReader for request path in c.
+// firstBody is an already correctly opened body for the blob, returning the full blob from the start.
+// If reading from firstBody fails, bodyReader may heuristically decide to resume.
+func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
+ logURL, err := c.resolveRequestURL(path)
+ if err != nil {
+ return nil, err
+ }
+ res := &bodyReader{
+ ctx: ctx,
+ c: c,
+ path: path,
+ logURL: logURL,
+ firstConnectionTime: time.Now(),
+
+ body: firstBody,
+ lastRetryOffset: -1,
+ lastRetryTime: time.Time{},
+ offset: 0,
+ lastSuccessTime: time.Time{},
+ }
+ return res, nil
+}
+
+// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number.
+func parseDecimalInString(s string, start int) (int64, int, error) {
+ i := start
+ for i < len(s) && s[i] >= '0' && s[i] <= '9' {
+ i++
+ }
+ if i == start {
+ return -1, -1, errors.New("missing decimal number")
+ }
+ v, err := strconv.ParseInt(s[start:i], 10, 64)
+ if err != nil {
+ return -1, -1, fmt.Errorf("parsing number: %w", err)
+ }
+ return v, i, nil
+}
+
+// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it.
+func parseExpectedChar(s string, pos int, expected byte) (int, error) {
+ if pos == len(s) || s[pos] != expected {
+ return -1, fmt.Errorf("missing expected %q", expected)
+ }
+ return pos + 1, nil
+}
+
+// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1.
+func parseContentRange(res *http.Response) (int64, int64, int64, error) {
+ hdrs := res.Header.Values("Content-Range")
+ switch len(hdrs) {
+ case 0:
+ return -1, -1, -1, errors.New("missing Content-Range: header")
+ case 1:
+ break
+ default:
+ return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs))
+ }
+ hdr := hdrs[0]
+ expectedPrefix := "bytes "
+ if !strings.HasPrefix(hdr, expectedPrefix) {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix)
+ }
+ first, pos, err := parseDecimalInString(hdr, len(expectedPrefix))
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err)
+ }
+ pos, err = parseExpectedChar(hdr, pos, '-')
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
+ }
+ last, pos, err := parseDecimalInString(hdr, pos)
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err)
+ }
+ pos, err = parseExpectedChar(hdr, pos, '/')
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
+ }
+ completeLength := int64(-1)
+ if pos < len(hdr) && hdr[pos] == '*' {
+ pos++
+ } else {
+ completeLength, pos, err = parseDecimalInString(hdr, pos)
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err)
+ }
+ }
+ if pos < len(hdr) {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr)
+ }
+ return first, last, completeLength, nil
+}
+
+// Read implements io.ReadCloser
+func (br *bodyReader) Read(p []byte) (int, error) {
+ if br.body == nil {
+ return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted())
+ }
+ n, err := br.body.Read(p)
+ br.offset += int64(n)
+ switch {
+ case err == nil || err == io.EOF:
+ br.lastSuccessTime = time.Now()
+ return n, err // Unlike the default: case, don’t log anything.
+
+ case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET):
+ originalErr := err
+ redactedURL := br.logURL.Redacted()
+ if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil {
+ return n, err
+ }
+
+ if err := br.body.Close(); err != nil {
+ logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
+ }
+ br.body = nil
+ time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
+
+ headers := map[string][]string{
+ "Range": {fmt.Sprintf("bytes=%d-", br.offset)},
+ }
+ res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err)
+ }
+ consumedBody := false
+ defer func() {
+ if !consumedBody {
+ res.Body.Close()
+ }
+ }()
+ switch res.StatusCode {
+ case http.StatusPartialContent: // OK
+ // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed.
+ // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation.
+ first, last, completeLength, err := parseContentRange(res)
+ if err != nil {
+ return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err)
+ }
+ // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob.
+ if first != br.offset || (completeLength != -1 && last+1 != completeLength) {
+ return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength)
+ }
+ // Continue below
+ case http.StatusOK:
+ return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK)
+ default:
+ err := registryHTTPResponseToError(res)
+ return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
+ }
+
+ logrus.Debugf("Successfully reconnected to %s", redactedURL)
+ consumedBody = true
+ br.body = res.Body
+ br.lastRetryOffset = br.offset
+ br.lastRetryTime = time.Now()
+ return n, nil
+
+ default:
+ logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err)
+ return n, err
+ }
+}
+
+// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value.
+// If tm is time.Time{}, it returns math.NaN()
+func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 {
+ if tm == (time.Time{}) {
+ return math.NaN()
+ }
+ return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0
+}
+
+// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
+// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
+func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
+ currentTime := time.Now()
+ msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime)
+ msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime)
+ msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime)
+ logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms",
+ redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess)
+ progress := br.offset - br.lastRetryOffset
+ if progress >= bodyReaderMinimumProgress {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
+ return nil
+ }
+ if br.lastRetryTime == (time.Time{}) {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
+ return nil
+ }
+ if msSinceLastRetry >= bodyReaderMSSinceLastRetry {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
+ return nil
+ }
+ logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry)
+ return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w",
+ br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr)
+}
+
+// Close implements io.ReadCloser
+func (br *bodyReader) Close() error {
+ if br.body == nil {
+ return nil
+ }
+ err := br.body.Close()
+ br.body = nil
+ return err
+}
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go
index 323a02fc0..64ccf6ae5 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/client.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go
@@ -9,11 +9,6 @@ import (
"github.com/docker/go-connections/tlsconfig"
)
-const (
- // The default API version to be used in case none is explicitly specified
- defaultAPIVersion = "1.22"
-)
-
// NewDockerClient initializes a new API client based on the passed SystemContext.
func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
host := dockerclient.DefaultDockerHost
@@ -21,33 +16,49 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
host = sys.DockerDaemonHost
}
- // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient.
- // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s
- // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket
- // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport.
+ opts := []dockerclient.Opt{
+ dockerclient.WithHost(host),
+ dockerclient.WithAPIVersionNegotiation(),
+ }
+
+ // We conditionalize building the TLS configuration only to TLS sockets:
+ //
+ // The dockerclient.Client implementation differentiates between
+ // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows)
+ // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS).
+ //
+ // Only Client.proto is set from the URL in dockerclient.WithHost(),
+ // Client.scheme is detected based on a http.Client.TLSClientConfig presence;
+ // dockerclient.WithHTTPClient with a client that has TLSClientConfig set
+ // will, by default, trigger an attempt to use TLS.
+ //
+ // So, don’t use WithHTTPClient for unix:// sockets at all.
//
- // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client.
+ // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://),
+ // we also should not set TLSClientConfig. We continue to use WithHTTPClient
+ // with our slightly non-default settings to avoid a behavior change on updates of c/image.
//
- // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set
- // TLSClientConfig to nil. This can be achieved by using the form `http://`
- url, err := dockerclient.ParseHostURL(host)
+ // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic
+ // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ;
+ // so that would not be any simpler.
+ serverURL, err := dockerclient.ParseHostURL(host)
if err != nil {
return nil, err
}
- var httpClient *http.Client
- if url.Scheme != "unix" {
- if url.Scheme == "http" {
- httpClient = httpConfig()
- } else {
- hc, err := tlsConfig(sys)
- if err != nil {
- return nil, err
- }
- httpClient = hc
+ switch serverURL.Scheme {
+ case "unix": // Nothing
+ case "http":
+ hc := httpConfig()
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
+ default:
+ hc, err := tlsConfig(sys)
+ if err != nil {
+ return nil, err
}
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
}
- return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil)
+ return dockerclient.NewClientWithOpts(opts...)
}
func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
@@ -69,6 +80,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
return &http.Client{
Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsc,
},
CheckRedirect: dockerclient.CheckRedirect,
@@ -78,6 +90,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
func httpConfig() *http.Client {
return &http.Client{
Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
TLSClientConfig: nil,
},
CheckRedirect: dockerclient.CheckRedirect,
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index 88609b3dc..4a59a6a61 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -2,13 +2,16 @@ package daemon
import (
"context"
+ "encoding/json"
+ "errors"
+ "fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,13 +29,13 @@ type daemonImageDestination struct {
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) {
if ref.ref == nil {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
@@ -42,7 +45,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "Error initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
reader, writer := io.Pipe()
@@ -53,20 +56,22 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
goroutineContext, goroutineCancel := context.WithCancel(ctx)
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
- return &daemonImageDestination{
+ d := &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
- Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
committed: false,
- }, nil
+ }
+ d.Destination = tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef, d.CommitWithOptions)
+ return d, nil
}
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
+ defer c.Close()
err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
defer func() {
logrus.Debugf("docker-daemon: sending done, status %v", err)
@@ -82,12 +87,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
}
}()
+ err = imageLoad(ctx, c, reader)
+}
+
+// imageLoad accepts tar stream on reader and sends it to c
+func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
- err = errors.Wrap(err, "Error saving image to docker engine")
- return
+ return fmt.Errorf("starting a load operation in docker engine: %w", err)
}
defer resp.Body.Close()
+
+ // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
+ // copied here to minimize dependencies.
+ type jsonError struct {
+ Message string `json:"message,omitempty"`
+ }
+ type jsonMessage struct {
+ Error *jsonError `json:"errorDetail,omitempty"`
+ }
+
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var msg jsonMessage
+ if err := dec.Decode(&msg); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return fmt.Errorf("parsing docker load progress: %w", err)
+ }
+ if msg.Error != nil {
+ return fmt.Errorf("docker engine reported: %q", msg.Error.Message)
+ }
+ }
+ return nil // No error reported = success
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
@@ -114,7 +147,7 @@ func (d *daemonImageDestination) Close() error {
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
// Whether that works or not, closing the PipeWriter seems desirable in any case.
- if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
+ if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .CommitWithOptions()")); err != nil {
return err
}
}
@@ -127,11 +160,11 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
return d.ref
}
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *daemonImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.archive.Close(); err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index 74a678817..10923c278 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -2,10 +2,11 @@ package daemon
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type daemonImageSource struct {
@@ -22,16 +23,18 @@ type daemonImageSource struct {
// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
// is the config, and that the following len(RootFS) files are the layers, but that feels
// way too brittle.)
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "Error initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
+ defer c.Close()
+
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
if err != nil {
- return nil, errors.Wrap(err, "Error loading image from docker engine")
+ return nil, fmt.Errorf("loading image from docker engine: %w", err)
}
defer inputStream.Close()
@@ -39,7 +42,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
if err != nil {
return nil, err
}
- src := tarfile.NewSource(archive, true, nil, -1)
+ src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
index 4e4ed6881..9eedff745 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
@@ -2,15 +2,15 @@ package daemon
import (
"context"
+ "errors"
"fmt"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t daemonTransport) ParseReference(reference string) (types.ImageReference,
func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// ID values cannot be effectively namespaced, and are clearly invalid host:port values.
if _, err := digest.Parse(scope); err == nil {
- return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
+ return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
}
// FIXME? We could be verifying the various character set and length restrictions
@@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
// For daemonImageDestination, it must be a ref, which is NamedTagged.
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
-// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
+// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
type daemonReference struct {
id digest.Digest
ref reference.Named // !reference.IsNameOnly
@@ -70,7 +70,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
}
return NewReference(dgst, nil)
}
@@ -80,7 +80,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, err
}
if reference.FamiliarName(ref) == digest.Canonical.String() {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
}
return NewReference("", ref)
}
@@ -92,7 +92,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
}
if ref != nil {
if reference.IsNameOnly(ref) {
- return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
@@ -102,7 +102,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
+ return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
}
}
return daemonReference{
@@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string {
@@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -219,5 +215,5 @@ func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemCon
// Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
- return errors.Errorf("Deleting images not implemented for docker-daemon: images")
+ return errors.New("Deleting images not implemented for docker-daemon: images")
}
diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go
new file mode 100644
index 000000000..0a0064576
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go
@@ -0,0 +1,148 @@
+// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
+// Copyright 2022 github.com/distribution/distribution authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "slices"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
+)
+
+// errNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type unexpectedHTTPStatusError struct {
+ Status string
+}
+
+func (e *unexpectedHTTPStatusError) Error() string {
+ return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// unexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type unexpectedHTTPResponseError struct {
+ ParseErr error
+ StatusCode int
+ Response []byte
+}
+
+func (e *unexpectedHTTPResponseError) Error() string {
+ return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+ var errors errcode.Errors
+ body, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ // For backward compatibility, handle irregularly formatted
+ // messages that contain a "details" field.
+ var detailsErr struct {
+ Details string `json:"details"`
+ }
+ err = json.Unmarshal(body, &detailsErr)
+ if err == nil && detailsErr.Details != "" {
+ switch statusCode {
+ case http.StatusUnauthorized:
+ return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+ }
+ }
+
+ if err := json.Unmarshal(body, &errors); err != nil {
+ return &unexpectedHTTPResponseError{
+ ParseErr: err,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ if len(errors) == 0 {
+ // If there was no error specified in the body, return
+ // UnexpectedHTTPResponseError.
+ return &unexpectedHTTPResponseError{
+ ParseErr: errNoErrorsInBody,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ return errors
+}
+
+func makeErrorList(err error) []error {
+ if errL, ok := err.(errcode.Errors); ok {
+ return []error(errL)
+ }
+ return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+ return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
+}
+
+// handleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func handleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ // Check for OAuth errors within the `WWW-Authenticate` header first
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ for _, c := range dockerChallenge.ResponseChallenges(resp) {
+ if c.Scheme == "bearer" {
+ var err errcode.Error
+ // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+ switch c.Parameters["error"] {
+ case "invalid_token":
+ err.Code = errcode.ErrorCodeUnauthorized
+ case "insufficient_scope":
+ err.Code = errcode.ErrorCodeDenied
+ default:
+ continue
+ }
+ if description := c.Parameters["error_description"]; description != "" {
+ err.Message = description
+ } else {
+ err.Message = err.Code.Message()
+ }
+
+ return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+ }
+ }
+ err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+ if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+ }
+ return err
+ }
+ return &unexpectedHTTPStatusError{Status: resp.Status}
+}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index a9533ea39..bd024422b 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -1,13 +1,12 @@
package docker
import (
- "bytes"
"context"
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"os"
@@ -19,16 +18,21 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/multierr"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/useragent"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
- "github.com/containers/image/v5/version"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
- clientLib "github.com/docker/distribution/registry/client"
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -38,7 +42,6 @@ const (
dockerRegistry = "registry-1.docker.io"
resolvedPingV2URL = "%s://%s/v2/"
- resolvedPingV1URL = "%s://%s/v1/_ping"
tagsPath = "/v2/%s/tags/list"
manifestPath = "/v2/%s/manifests/%s"
blobsPath = "/v2/%s/blobs/%s"
@@ -63,11 +66,9 @@ type certPath struct {
var (
homeCertDir = filepath.FromSlash(".config/containers/certs.d")
perHostCertDirs = []certPath{
- {path: "/etc/containers/certs.d", absolute: true},
- {path: "/etc/docker/certs.d", absolute: true},
+ {path: etcDir + "/containers/certs.d", absolute: true},
+ {path: etcDir + "/docker/certs.d", absolute: true},
}
-
- defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
)
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
@@ -84,15 +85,13 @@ type extensionSignatureList struct {
Signatures []extensionSignature `json:"signatures"`
}
+// bearerToken records a cached token we can use to authenticate.
type bearerToken struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
+ token string
expirationTime time.Time
}
-// dockerClient is configuration for dealing with a single Docker registry.
+// dockerClient is configuration for dealing with a single container registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
sys *types.SystemContext
@@ -103,10 +102,11 @@ type dockerClient struct {
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
tlsClientConfig *tls.Config
// The following members are not set by newDockerClient and must be set by callers if needed.
- auth types.DockerAuthConfig
- registryToken string
- signatureBase signatureStorageBase
- scope authScope
+ auth types.DockerAuthConfig
+ registryToken string
+ signatureBase lookasideStorageBase
+ useSigstoreAttachments bool
+ scope authScope
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
@@ -120,11 +120,15 @@ type dockerClient struct {
// Private state for detectProperties:
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
detectPropertiesError error // detectPropertiesError caches the initial error.
+ // Private state for logResponseWarnings
+ reportedWarningsLock sync.Mutex
+ reportedWarnings *set.Set[string]
}
type authScope struct {
- remoteName string
- actions string
+ resourceType string
+ remoteName string
+ actions string
}
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
@@ -140,37 +144,6 @@ const (
noAuth
)
-func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
- token := new(bearerToken)
- if err := json.Unmarshal(blob, &token); err != nil {
- return nil, err
- }
- if token.Token == "" {
- token.Token = token.AccessToken
- }
- if token.ExpiresIn < minimumTokenLifetimeSeconds {
- token.ExpiresIn = minimumTokenLifetimeSeconds
- logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
- }
- if token.IssuedAt.IsZero() {
- token.IssuedAt = time.Now().UTC()
- }
- token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
- return token, nil
-}
-
-// this is cloned from docker/go-connections because upstream docker has changed
-// it and make deps here fails otherwise.
-// We'll drop this once we upgrade to docker 1.13.x deps.
-func serverDefault() *tls.Config {
- return &tls.Config{
- // Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
- PreferServerCipherSuites: true,
- CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
- }
-}
-
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
if sys != nil && sys.DockerCertPath != "" {
@@ -193,7 +166,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
}
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
- _, err := os.Stat(fullCertDirPath)
+ err := fileutils.Exists(fullCertDirPath)
if err == nil {
break
}
@@ -212,18 +185,19 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write†specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
// signatureBase is always set in the return value
-func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
- registry := reference.Domain(ref.ref)
- auth, err := config.GetCredentials(sys, registry)
+// The caller must call .Close() on the returned client when done.
+func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
+ auth, err := config.GetCredentialsForRef(sys, ref.ref)
if err != nil {
- return nil, errors.Wrapf(err, "error getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
- sigBase, err := SignatureStorageBaseURL(sys, ref, write)
+ sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write)
if err != nil {
return nil, err
}
+ registry := reference.Domain(ref.ref)
client, err := newDockerClient(sys, registry, ref.ref.Name())
if err != nil {
return nil, err
@@ -233,6 +207,8 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
client.registryToken = sys.DockerBearerRegistryToken
}
client.signatureBase = sigBase
+ client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
+ client.scope.resourceType = "repository"
client.scope.actions = actions
client.scope.remoteName = reference.Path(ref.ref)
return client, nil
@@ -244,12 +220,15 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
// (e.g., "registry.com[:5000][/some/namespace]/repo").
// Please note that newDockerClient does not set all members of dockerClient
// (e.g., username and password); those must be set by callers if necessary.
+// The caller must call .Close() on the returned client when done.
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
registry = dockerRegistry
}
- tlsClientConfig := serverDefault()
+ tlsClientConfig := &tls.Config{
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ }
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
@@ -269,7 +248,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
skipVerify := false
reg, err := sysregistriesv2.FindRegistry(sys, reference)
if err != nil {
- return nil, errors.Wrapf(err, "error loading registries")
+ return nil, fmt.Errorf("loading registries: %w", err)
}
if reg != nil {
if reg.Blocked {
@@ -279,16 +258,17 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
}
tlsClientConfig.InsecureSkipVerify = skipVerify
- userAgent := defaultUserAgent
+ userAgent := useragent.DefaultUserAgent
if sys != nil && sys.DockerRegistryUserAgent != "" {
userAgent = sys.DockerRegistryUserAgent
}
return &dockerClient{
- sys: sys,
- registry: registry,
- userAgent: userAgent,
- tlsClientConfig: tlsClientConfig,
+ sys: sys,
+ registry: registry,
+ userAgent: userAgent,
+ tlsClientConfig: tlsClientConfig,
+ reportedWarnings: set.New[string](),
}, nil
}
@@ -297,20 +277,27 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
client, err := newDockerClient(sys, registry, registry)
if err != nil {
- return errors.Wrapf(err, "error creating new docker client")
+ return fmt.Errorf("creating new docker client: %w", err)
}
+ defer client.Close()
client.auth = types.DockerAuthConfig{
Username: username,
Password: password,
}
- resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
if err != nil {
return err
}
defer resp.Body.Close()
-
- return httpResponseToError(resp, "")
+ if resp.StatusCode != http.StatusOK {
+ err := registryHTTPResponseToError(resp)
+ if resp.StatusCode == http.StatusUnauthorized {
+ err = ErrUnauthorizedForCredentials{Err: err}
+ }
+ return err
+ }
+ return nil
}
// SearchResult holds the information of each matching image
@@ -343,9 +330,10 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
+ // We can't use GetCredentialsForRef here because we want to search the whole registry.
auth, err := config.GetCredentials(sys, registry)
if err != nil {
- return nil, errors.Wrapf(err, "error getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore
@@ -355,12 +343,18 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
hostname := registry
if registry == dockerHostname {
hostname = dockerV1Hostname
+ // A search term of library/foo does not find the library/foo image on the docker.io servers,
+ // which is surprising - and that Docker is modifying the search term client-side this same way,
+ // and it seems convenient to do the same thing.
+ // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
+ image = strings.TrimPrefix(image, "library/")
}
client, err := newDockerClient(sys, hostname, registry)
if err != nil {
- return nil, errors.Wrapf(err, "error creating new docker client")
+ return nil, fmt.Errorf("creating new docker client: %w", err)
}
+ defer client.Close()
client.auth = auth
if sys != nil {
client.registryToken = sys.DockerBearerRegistryToken
@@ -379,7 +373,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint")
- resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
@@ -399,15 +393,16 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
searchRes := []SearchResult{}
path := "/v2/_catalog"
for len(searchRes) < limit {
- resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ err := registryHTTPResponseToError(resp)
+ logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
v2Res := &V2Results{}
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
@@ -422,7 +417,14 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
res := SearchResult{
Name: repo,
}
- searchRes = append(searchRes, res)
+ // bugzilla.redhat.com/show_bug.cgi?id=1976283
+ // If we have a full match, make sure it's listed as the first result.
+ // (Note there might be a full match we never see if we reach the result limit first.)
+ if repo == image {
+ searchRes = append([]SearchResult{res}, searchRes...)
+ } else {
+ searchRes = append(searchRes, res)
+ }
}
}
@@ -430,8 +432,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if link == "" {
break
}
- linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
- linkURL, err := url.Parse(linkURLStr)
+ linkURLPart, _, _ := strings.Cut(link, ";")
+ linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return searchRes, err
}
@@ -454,8 +456,49 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
return nil, err
}
- url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
- return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
+ requestURL, err := c.resolveRequestURL(path)
+ if err != nil {
+ return nil, err
+ }
+ return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope)
+}
+
+// resolveRequestURL turns a path for c.makeRequest into a full URL.
+// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs.
+func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
+ urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ res, err := url.Parse(urlString)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// Checks if the auth headers in the response contain an indication of a failed
+// authorizdation because of an "insufficient_scope" error. If that's the case,
+// returns the required scope to be used for fetching a new token.
+func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) {
+ if res.StatusCode == http.StatusUnauthorized {
+ challenges := parseAuthHeader(res.Header)
+ for _, challenge := range challenges {
+ if challenge.Scheme == "bearer" {
+ if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
+ if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
+ if newScope, err := parseAuthScope(scope); err == nil {
+ return true, newScope
+ } else {
+ logrus.WithFields(logrus.Fields{
+ "error": err,
+ "scope": scope,
+ "challenge": challenge,
+ }).Error("Failed to parse the authentication scope from the given challenge")
+ }
+ }
+ }
+ }
+ }
+ }
+ return false, nil
}
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
@@ -471,9 +514,8 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
return time.Duration(num) * time.Second
}
// Second, check if we have an HTTP date.
- // If the delta between the date and now is positive, use it.
- // Otherwise, fall back to using the default exponential back off.
if t, err := http.ParseTime(after); err == nil {
+ // If the delta between the date and now is positive, use it.
delta := time.Until(t)
if delta > 0 {
return delta
@@ -481,7 +523,6 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
logrus.Debugf("Retry-After date in the past, ignoring it")
return fallbackDelay
}
- // If the header contents are bogus, fall back to using the default exponential back off.
logrus.Debugf("Invalid Retry-After format, ignoring it")
return fallbackDelay
}
@@ -491,32 +532,60 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
// makeRequest should generally be preferred.
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
// TODO(runcom): too many arguments here, use a struct
-func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
delay := backoffInitialDelay
attempts := 0
for {
- res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
+ res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
+ if err != nil {
+ return nil, err
+ }
attempts++
- if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
+
+ // By default we use pre-defined scopes per operation. In
+ // certain cases, this can fail when our authentication is
+ // insufficient, then we might be getting an error back with a
+ // Www-Authenticate Header indicating an insufficient scope.
+ //
+ // Check for that and update the client challenges to retry after
+ // requesting a new token
+ //
+ // We only try this on the first attempt, to not overload an
+ // already struggling server.
+ // We also cannot retry with a body (stream != nil) as stream
+ // was already read
+ if attempts == 1 && stream == nil && auth != noAuth {
+ if retry, newScope := needsRetryWithUpdatedScope(res); retry {
+ logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
+ res.Body.Close()
+ // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
+ // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
+ // for more than one extra scope.
+ res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
+ if err != nil {
+ return nil, err
+ }
+ extraScope = newScope
+ }
+ }
+
+ if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
stream != nil || // We can't retry with a body (which is not restartable in the general case)
attempts == backoffNumIterations {
- return res, err
+ return res, nil
}
// close response body before retry or context done
res.Body.Close()
- delay = parseRetryAfter(res, delay)
- if delay > backoffMaxDelay {
- delay = backoffMaxDelay
- }
- logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds())
+ delay = min(parseRetryAfter(res, delay), backoffMaxDelay)
+ logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds())
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(delay):
// Nothing
}
- delay = delay * 2 // exponential back off
+ delay *= 2 // If the registry does not specify a delay, back off exponentially.
}
}
@@ -524,13 +593,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// Note that no exponential back off is performed when receiving an http 429 status code.
-func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
- req, err := http.NewRequest(method, url, stream)
+func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
- if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
+ if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
@@ -545,14 +613,85 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
return nil, err
}
}
- logrus.Debugf("%s %s", method, url)
+ logrus.Debugf("%s %s", method, resolvedURL.Redacted())
res, err := c.client.Do(req)
if err != nil {
return nil, err
}
+ if warnings := res.Header.Values("Warning"); len(warnings) != 0 {
+ c.logResponseWarnings(res, warnings)
+ }
return res, nil
}
+// logResponseWarnings logs warningHeaders from res, if any.
+func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) {
+ c.reportedWarningsLock.Lock()
+ defer c.reportedWarningsLock.Unlock()
+
+ for _, header := range warningHeaders {
+ warningString := parseRegistryWarningHeader(header)
+ if warningString == "" {
+ logrus.Debugf("Ignored Warning: header from registry: %q", header)
+ } else {
+ if !c.reportedWarnings.Contains(warningString) {
+ c.reportedWarnings.Add(warningString)
+ // Note that reportedWarnings is based only on warningString, so that we don’t
+ // repeat the same warning for every request - but the warning includes the URL;
+ // so it may not be specific to that URL.
+ logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString)
+ } else {
+ logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString)
+ }
+ }
+ }
+}
+
+// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning
+// values allowed by opencontainers/distribution-spec.
+// It returns the warning string if the header has the expected format, or "" otherwise.
+func parseRegistryWarningHeader(header string) string {
+ const expectedPrefix = `299 - "`
+ const expectedSuffix = `"`
+
+ // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
+ // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
+ header, ok := strings.CutPrefix(header, expectedPrefix)
+ if !ok {
+ return ""
+ }
+ header, ok = strings.CutSuffix(header, expectedSuffix)
+ if !ok {
+ return ""
+ }
+
+ // â€Recipients that process the value of a quoted-string MUST handle a quoted-pair
+ // as if it were replaced by the octet following the backslash.â€, so let’s do that…
+ res := strings.Builder{}
+ afterBackslash := false
+ for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points
+ switch {
+ case c == 0x7F || (c < ' ' && c != '\t'):
+ return "" // Control characters are forbidden
+ case afterBackslash:
+ res.WriteByte(c)
+ afterBackslash = false
+ case c == '"':
+ // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows,
+ // or completely invalid input.
+ return ""
+ case c == '\\':
+ afterBackslash = true
+ default:
+ res.WriteByte(c)
+ }
+ }
+ if afterBackslash {
+ return ""
+ }
+ return res.String()
+}
+
// we're using the challenges from the /v2/ ping response and not the one from the destination
// URL in this request because:
//
@@ -577,8 +716,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
cacheKey := ""
scopes := []authScope{c.scope}
if extraScope != nil {
- // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
- cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
+ // Using ':' as a separator here is unambiguous because getBearerToken below
+ // uses the same separator when formatting a remote request (and because
+ // repository names that we create can't contain colons, and extraScope values
+ // coming from a server come from `parseAuthScope`, which also splits on colons).
+ cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
+ if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
+ return fmt.Errorf(
+ "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
+ cacheKey,
+ colonCount,
+ )
+ }
scopes = append(scopes, *extraScope)
}
var token bearerToken
@@ -603,7 +752,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
token = *t
c.tokenCache.Store(cacheKey, token)
}
- registryToken = token.Token
+ registryToken = token.token
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
return nil
@@ -619,35 +768,34 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodPost, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
-
// Make the form data required against the oauth2 authentication
// More details here: https://docs.docker.com/registry/spec/auth/oauth/
params := authReq.URL.Query()
if service, ok := challenge.Parameters["service"]; ok && service != "" {
params.Add("service", service)
}
+
for _, scope := range scopes {
- if scope.remoteName != "" && scope.actions != "" {
- params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
}
}
params.Add("grant_type", "refresh_token")
params.Add("refresh_token", c.auth.IdentityToken)
params.Add("client_id", "containers/image")
- authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
+ authReq.Body = io.NopCloser(strings.NewReader(params.Encode()))
authReq.Header.Add("User-Agent", c.userAgent)
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
- logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
res, err := c.client.Do(authReq)
if err != nil {
return nil, err
@@ -657,27 +805,21 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
return nil, err
}
- tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
- if err != nil {
- return nil, err
- }
-
- return newBearerTokenFromJSONBlob(tokenBlob)
+ return newBearerTokenFromHTTPResponseBody(res)
}
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodGet, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
params := authReq.URL.Query()
if c.auth.Username != "" {
params.Add("account", c.auth.Username)
@@ -688,8 +830,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
}
for _, scope := range scopes {
- if scope.remoteName != "" && scope.actions != "" {
- params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
}
}
@@ -700,21 +842,59 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
}
authReq.Header.Add("User-Agent", c.userAgent)
- logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
res, err := c.client.Do(authReq)
if err != nil {
return nil, err
}
defer res.Body.Close()
- if err := httpResponseToError(res, "Requesting bear token"); err != nil {
+ if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
return nil, err
}
- tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
+
+ return newBearerTokenFromHTTPResponseBody(res)
+}
+
+// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken.
+// The caller is still responsible for ensuring res.Body is closed.
+func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) {
+ blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
if err != nil {
return nil, err
}
- return newBearerTokenFromJSONBlob(tokenBlob)
+ var token struct {
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
+ }
+ if err := json.Unmarshal(blob, &token); err != nil {
+ const bodySampleLength = 50
+ bodySample := blob
+ if len(bodySample) > bodySampleLength {
+ bodySample = bodySample[:bodySampleLength]
+ }
+ return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err)
+ }
+
+ bt := &bearerToken{
+ token: token.Token,
+ }
+ if bt.token == "" {
+ bt.token = token.AccessToken
+ }
+
+ if token.ExpiresIn < minimumTokenLifetimeSeconds {
+ token.ExpiresIn = minimumTokenLifetimeSeconds
+ logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
+ }
+ if token.IssuedAt.IsZero() {
+ token.IssuedAt = time.Now().UTC()
+ }
+ bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ return bt, nil
}
// detectPropertiesHelper performs the work of detectProperties which executes
@@ -730,16 +910,19 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
c.client = &http.Client{Transport: tr}
ping := func(scheme string) error {
- url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
+ if err != nil {
+ return err
+ }
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
if err != nil {
- logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
+ logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
return err
}
defer resp.Body.Close()
- logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
- return httpResponseToError(resp, "")
+ return registryHTTPResponseToError(resp)
}
c.challenges = parseAuthHeader(resp.Header)
c.scheme = scheme
@@ -751,32 +934,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
err = ping("http")
}
if err != nil {
- err = errors.Wrapf(err, "error pinging docker registry %s", c.registry)
- if c.sys != nil && c.sys.DockerDisableV1Ping {
- return err
- }
- // best effort to understand if we're talking to a V1 registry
- pingV1 := func(scheme string) bool {
- url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
- if err != nil {
- logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
- return false
- }
- defer resp.Body.Close()
- logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
- if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
- return false
- }
- return true
- }
- isV1 := pingV1("https")
- if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
- isV1 = pingV1("http")
- }
- if isV1 {
- err = ErrV1NotSupported
- }
+ err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
}
return err
}
@@ -788,18 +946,205 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return c.detectPropertiesError
}
+// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest.
+// The caller is responsible for ensuring tagOrDigest uses the expected format.
+func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+ res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
+func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+ var remoteErrors []error
+ for _, u := range urls {
+ blobURL, err := url.Parse(u)
+ if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ // NOTE: we must not authenticate on additional URLs as those
+ // can be abused to leak credentials or tokens. Please
+ // refer to CVE-2020-15157 for more information.
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
+ if err != nil {
+ remoteErrors = append(remoteErrors, err)
+ continue
+ }
+ if resp.StatusCode != http.StatusOK {
+ err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
+ remoteErrors = append(remoteErrors, err)
+ logrus.Debug(err)
+ resp.Body.Close()
+ continue
+ }
+ return resp.Body, getBlobSize(resp), nil
+ }
+ if remoteErrors == nil {
+ return nil, 0, nil // fallback to non-external blob
+ }
+ return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors))
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ r, s, err := c.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
+ }
+
+ if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
+ return nil, 0, err
+ }
+ path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if res.StatusCode != http.StatusOK {
+ err := registryHTTPResponseToError(res)
+ res.Body.Close()
+ return nil, 0, fmt.Errorf("fetching blob: %w", err)
+ }
+ cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
+ blobSize := getBlobSize(res)
+
+ reconnectingReader, err := newBodyReader(ctx, c, path, res.Body)
+ if err != nil {
+ res.Body.Close()
+ return nil, 0, err
+ }
+ return reconnectingReader, blobSize, nil
+}
+
+// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit.
+func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+ payload, err := iolimits.ReadAtMost(reader, maxSize)
+ if err != nil {
+ return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
+ }
+ return payload, nil
+}
+
+// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown†error.
+func isManifestUnknownError(err error) bool {
+ // docker/distribution, and as defined in the spec
+ var ec errcode.ErrorCoder
+ if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
+ return true
+ }
+ // registry.redhat.io as of October 2022
+ var e errcode.Error
+ if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
+ return true
+ }
+ // Harbor v2.10.2
+ if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") {
+ return true
+ }
+
+ // opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
+ // but specifies that the HTTP status must be 404.
+ var unexpected *unexpectedHTTPResponseError
+ if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound {
+ return true
+ }
+ return false
+}
+
+// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
+// digest in ref.
+// It returns (nil, nil) if the manifest does not exist.
+func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
+ tag, err := sigstoreAttachmentTag(digest)
+ if err != nil {
+ return nil, err
+ }
+ sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String())
+ manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag)
+ if err != nil {
+ // FIXME: Are we going to need better heuristics??
+ // This alone is probably a good enough reason for sigstore to be opt-in only,
+ // otherwise we would just break ordinary copies.
+ if isManifestUnknownError(err) {
+ logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err)
+ return nil, nil
+ }
+ logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err)
+ return nil, err
+ }
+ if mimeType != imgspecv1.MediaTypeImageManifest {
+ // FIXME: Try anyway??
+ return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q",
+ sigstoreRef.String(), mimeType)
+ }
+ res, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err)
+ }
+ return res, nil
+}
+
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
+ if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters
+ return nil, err
+ }
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
- res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
-
if res.StatusCode != http.StatusOK {
- return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
+ return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res))
}
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
@@ -809,7 +1154,23 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
var parsedBody extensionSignatureList
if err := json.Unmarshal(body, &parsedBody); err != nil {
- return nil, errors.Wrapf(err, "Error decoding signature list")
+ return nil, fmt.Errorf("decoding signature list: %w", err)
}
return &parsedBody, nil
}
+
+// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
+func sigstoreAttachmentTag(d digest.Digest) (string, error) {
+ if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters
+ return "", err
+ }
+ return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil
+}
+
+// Close removes resources associated with an initialized dockerClient, if any.
+func (c *dockerClient) Close() error {
+ if c.client != nil {
+ c.client.CloseIdleConnections()
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index f9fe4e8a3..74f559dce 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -3,17 +3,18 @@ package docker
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
@@ -56,25 +57,30 @@ func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
+ return nil, errors.New("ref must be a dockerReference")
}
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return nil, errors.Wrap(err, "failed to create client")
+ return nil, fmt.Errorf("failed to create client: %w", err)
}
+ defer client.Close()
tags := make([]string, 0)
for {
- res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
- if err := httpResponseToError(res, "Error fetching tags list"); err != nil {
- return nil, err
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res))
}
var tagsHolder struct {
@@ -83,15 +89,34 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
return nil, err
}
- tags = append(tags, tagsHolder.Tags...)
+ for _, tag := range tagsHolder.Tags {
+ if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
+ // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary
+ // to the spec, may include JSON null values in the list; and Go silently parses them as "".
+ if tag == "" {
+ logrus.Debugf("Ignoring invalid empty tag")
+ continue
+ }
+ // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
+ // contrary to the tag format specified in
+ // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
+ // include digests in the list.
+ if _, err := digest.Parse(tag); err == nil {
+ logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag)
+ continue
+ }
+ return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
+ }
+ tags = append(tags, tag)
+ }
link := res.Header.Get("Link")
if link == "" {
break
}
- linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
- linkURL, err := url.Parse(linkURLStr)
+ linkURLPart, _, _ := strings.Cut(link, ";")
+ linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return tags, err
}
@@ -116,7 +141,10 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return "", errors.Errorf("ref must be a dockerReference")
+ return "", errors.New("ref must be a dockerReference")
+ }
+ if dr.isUnknownDigest {
+ return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport())
}
tagOrDigest, err := dr.tagOrDigest()
@@ -124,24 +152,29 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
return "", err
}
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return "", err
+ }
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return "", errors.Wrap(err, "failed to create client")
+ return "", fmt.Errorf("failed to create client: %w", err)
}
+ defer client.Close()
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
- res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name())
+ return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res))
}
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index e11084dc8..f5e2bb61e 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -5,31 +5,44 @@ import (
"context"
"crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "maps"
"net/http"
"net/url"
"os"
"path/filepath"
+ "slices"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+
ref dockerReference
c *dockerClient
// State
@@ -37,15 +50,40 @@ type dockerImageDestination struct {
}
// newImageDestination creates a new ImageDestination for the specified image reference.
-func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
- c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
+func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push")
if err != nil {
return nil, err
}
- return &dockerImageDestination{
+ mimeTypes := []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageIndex,
+ manifest.DockerV2ListMediaType,
+ }
+ if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes {
+ mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
+ }
+
+ dest := &dockerImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: mimeTypes,
+ DesiredLayerCompression: types.Compress,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
ref: ref,
c: c,
- }, nil
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -56,20 +94,7 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
- return nil
-}
-
-func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
- mimeTypes := []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- imgspecv1.MediaTypeImageIndex,
- manifest.DockerV2ListMediaType,
- }
- if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes {
- mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
- }
- return mimeTypes
+ return d.c.Close()
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
@@ -84,32 +109,16 @@ func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
case d.c.signatureBase != nil:
return nil
default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
}
}
-func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
-}
-
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
@@ -118,104 +127,115 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
return len(p), nil
}
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- if inputInfo.Digest.String() != "" {
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
+ // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
+ // the source blob is uncompressed, and the destination blob is being compressed "on the fly".
+ if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
+ logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer cleanup()
+ stream = streamCopy
+ }
+
+ if inputInfo.Digest != "" {
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
- // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
- haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
if haveBlob {
- return reusedInfo, nil
+ return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
}
}
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
- res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
+ return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
+ return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
}
- digester := digest.Canonical.Digester()
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
sizeCounter := &sizeCounter{}
+ stream = io.TeeReader(stream, sizeCounter)
+
uploadLocation, err = func() (*url.URL, error) { // A scope for defer
- uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)))
+ uploadReader := uploadreader.NewUploadReader(stream)
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isn’t a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
if err != nil {
logrus.Debugf("Error uploading layer chunked %v", err)
return nil, err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
+ return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return nil, errors.Wrap(err, "Error determining upload URL")
+ return nil, fmt.Errorf("determining upload URL: %w", err)
}
return uploadLocation, nil
}()
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
- // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
- locationQuery.Set("digest", computedDigest.String())
+ locationQuery.Set("digest", blobDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
- res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
+ return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
}
- logrus.Debugf("Upload of layer %s complete", computedDigest)
- cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
- return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
+ logrus.Debugf("Upload of layer %s complete", blobDigest)
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
+ return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
}
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
+ if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
+ return false, -1, err
+ }
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
- res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
if err != nil {
return false, -1, err
}
@@ -226,12 +246,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
- return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
}
}
@@ -244,9 +264,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
"from": {reference.Path(srcRepo)},
}.Encode(),
}
- mountPath := u.String()
- logrus.Debugf("Trying to mount %s", mountPath)
- res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
+ logrus.Debugf("Trying to mount %s", u.Redacted())
+ res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope)
if err != nil {
return err
}
@@ -261,10 +280,10 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
uploadLocation, err := res.Location()
if err != nil {
- return errors.Wrap(err, "Error determining upload URL after a mount attempt")
+ return fmt.Errorf("determining upload URL after a mount attempt: %w", err)
}
- logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
- res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else {
@@ -277,55 +296,108 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
- return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res))
}
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
- }
-
- // First, check whether the blob happens to already exist at the destination.
+// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
+// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
+// The caller must ensure info.Digest is set.
+func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
- return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
+ return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
+ return false, private.ReusedBlob{}, nil
+}
- // Then try reusing blobs from other locations.
- bic := blobinfocache.FromBlobInfoCache(cache)
- candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
- for _, candidate := range candidates {
- candidateRepo, err := parseBICLocationReference(candidate.Location)
+func optionalCompressionName(algo *compressiontypes.Algorithm) string {
+ if algo != nil {
+ return algo.Name()
+ }
+ return "nil"
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if info.Digest == "" {
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
+ }
+
+ originalCandidateKnownToBeMissing := false
+ if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ // First, check whether the blob happens to already exist at the destination.
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
- logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
- continue
+ return false, private.ReusedBlob{}, err
}
- if candidate.CompressorName != blobinfocache.Uncompressed {
- logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
- } else {
- logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
+ if haveBlob {
+ return true, reusedInfo, nil
}
+ originalCandidateKnownToBeMissing = true
+ } else {
+ logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
+ optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
+ // We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
+ // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
+ // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
+ // with the BIC’s annotations.
+ // This is not quite correct, it only works if the BIC also contains an acceptable _location_.
+ // Ideally, we could look up just the compression algorithm/annotations for info.digest,
+ // and use it even if no location candidate exists and the original dandidate is present.
+ }
- // Sanity checks:
- if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
- logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
- continue
+ // Then try reusing blobs from other locations.
+ candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{
+ CanSubstitute: options.CanSubstitute,
+ PossibleManifestFormats: options.PossibleManifestFormats,
+ RequiredCompression: options.RequiredCompression,
+ })
+ for _, candidate := range candidates {
+ var candidateRepo reference.Named
+ if !candidate.UnknownLocation {
+ var err error
+ candidateRepo, err = parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ if candidate.CompressionAlgorithm != nil {
+ logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
+ } else {
+ logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
+ }
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
+ // (the "from" parameter); in that case we might try to use these candidates as well.
+ //
+ // OTOH that would mean we can’t do the “blobExists†check, and if there is no match
+ // we could get an upload request that we would have to cancel.
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ } else {
+ if candidate.CompressionAlgorithm != nil {
+ logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name())
+ } else {
+ logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
+ }
+ // This digest is a known variant of this blob but we don’t
+ // have a recorded location in this registry, let’s try looking
+ // for it in the current repo.
+ candidateRepo = reference.TrimNamed(d.ref.ref)
}
- if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ if originalCandidateKnownToBeMissing &&
+ candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
continue
}
@@ -335,8 +407,9 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
// Checking candidateRepo, and mounting from it, requires an
// expanded token scope.
extraScope := &authScope{
- remoteName: reference.Path(candidateRepo),
- actions: "pull",
+ resourceType: "repository",
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
}
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
@@ -361,18 +434,18 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
}
- bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
-
- compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
- if err != nil {
- logrus.Debugf("... Failed: %v", err)
- continue
- }
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
- return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
+ return true, private.ReusedBlob{
+ Digest: candidate.Digest,
+ Size: size,
+ CompressionOperation: candidate.CompressionOperation,
+ CompressionAlgorithm: candidate.CompressionAlgorithm,
+ CompressionAnnotations: candidate.CompressionAnnotations,
+ }, nil
}
- return false, types.BlobInfo{}, nil
+ return false, private.ReusedBlob{}, nil
}
// PutManifest writes manifest to the destination.
@@ -383,23 +456,32 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- refTail := ""
- if instanceDigest != nil {
+ var refTail string
+ // If d.ref.isUnknownDigest=true, then we push without a tag, so get the
+ // digest that will be used
+ if d.ref.isUnknownDigest {
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ refTail = digest.String()
+ } else if instanceDigest != nil {
// If the instanceDigest is provided, then use it as the refTail, because the reference,
// whether it includes a tag or a digest, refers to the list as a whole, and not this
// particular instance.
refTail = instanceDigest.String()
// Double-check that the manifest we've been given matches the digest we've been given.
+ // This also validates the format of instanceDigest.
matches, err := manifest.MatchesDigest(m, *instanceDigest)
if err != nil {
- return errors.Wrapf(err, "error digesting manifest in PutManifest")
+ return fmt.Errorf("digesting manifest in PutManifest: %w", err)
}
if !matches {
manifestDigest, merr := manifest.Digest(m)
if merr != nil {
- return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr)
}
- return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
}
} else {
// Compute the digest of the main manifest, or the list if it's a list, so that we
@@ -417,25 +499,43 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
}
- path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
+ return d.uploadManifest(ctx, m, refTail)
+}
+
+// uploadManifest writes manifest to tagOrDigest.
+func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error {
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
- res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
if err != nil {
return err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
- if isManifestInvalidError(errors.Cause(err)) {
+ rawErr := registryHTTPResponseToError(res)
+ err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr)
+ if isManifestInvalidError(rawErr) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
+ // A HTTP server may not be a registry at all, and just return 200 OK to everything
+ // (in particular that can fairly easily happen after tearing down a website and
+ // replacing it with a global 302 redirect to a new website, completely ignoring the
+ // path in the request); in that case we could “succeed†uploading a whole image.
+ // With docker/distribution we could rely on a Docker-Content-Digest header being present
+ // (because docker/distribution/registry/client has been failing uploads if it was missing),
+ // but that has been defined as explicitly optional by
+ // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers
+ // So, just note the missing header in a debug log.
+ if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 {
+ logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry")
+ }
return nil
}
@@ -445,15 +545,10 @@ func successStatus(status int) bool {
return status >= 200 && status <= 399
}
-// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid†error.
+// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid†error.
func isManifestInvalidError(err error) bool {
- errors, ok := err.(errcode.Errors)
- if !ok || len(errors) == 0 {
- return false
- }
- err = errors[0]
- ec, ok := err.(errcode.ErrorCoder)
- if !ok {
+ var ec errcode.ErrorCoder
+ if ok := errors.As(err, &ec); !ok {
return false
}
@@ -476,38 +571,63 @@ func isManifestInvalidError(err error) bool {
}
}
-// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when
-// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- // Do not fail if we don’t really need to support signatures.
- if len(signatures) == 0 {
- return nil
- }
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest == nil {
- if d.manifestDigest.String() == "" {
+ if d.manifestDigest == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
- return errors.Errorf("Unknown manifest digest, can't add signatures")
+ return errors.New("Unknown manifest digest, can't add signatures")
}
instanceDigest = &d.manifestDigest
}
- if err := d.c.detectProperties(ctx); err != nil {
- return err
+ sigstoreSignatures := []signature.Sigstore{}
+ otherSignatures := []signature.Signature{}
+ for _, sig := range signatures {
+ if sigstoreSig, ok := sig.(signature.Sigstore); ok {
+ sigstoreSignatures = append(sigstoreSignatures, sigstoreSig)
+ } else {
+ otherSignatures = append(otherSignatures, sig)
+ }
}
- switch {
- case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
- case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures, *instanceDigest)
- default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+
+ // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside
+ // instead, but that would probably be rather surprising.
+ // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read?
+
+ if len(sigstoreSignatures) != 0 {
+ if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil {
+ return err
+ }
+ }
+
+ if len(otherSignatures) != 0 {
+ if err := d.c.detectProperties(ctx); err != nil {
+ return err
+ }
+ switch {
+ case d.c.supportsSignatures:
+ if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil {
+ return err
+ }
+ case d.c.signatureBase != nil:
+ if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
}
+
+ return nil
}
-// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
+// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
// which is not nil, for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -518,11 +638,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
- err := d.putOneSignature(url, signature)
+ sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
if err != nil {
return err
}
+ if err := d.putOneSignature(sigURL, signature); err != nil {
+ return err
+ }
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
@@ -530,8 +652,11 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
- missing, err := d.c.deleteOneSignature(url)
+ sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
+ if err != nil {
+ return err
+ }
+ missing, err := d.c.deleteOneSignature(sigURL)
if err != nil {
return err
}
@@ -543,52 +668,190 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
return nil
}
-// putOneSignature stores one signature to url.
+// putOneSignature stores sig to sigURL.
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
- switch url.Scheme {
+func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error {
+ switch sigURL.Scheme {
case "file":
- logrus.Debugf("Writing to %s", url.Path)
- err := os.MkdirAll(filepath.Dir(url.Path), 0755)
+ logrus.Debugf("Writing to %s", sigURL.Path)
+ err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755)
+ if err != nil {
+ return err
+ }
+ blob, err := signature.Blob(sig)
if err != nil {
return err
}
- err = ioutil.WriteFile(url.Path, signature, 0644)
+ err = os.WriteFile(sigURL.Path, blob, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
- return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
default:
- return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
+ return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted())
+ }
+}
+
+func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error {
+ if !d.c.useSigstoreAttachments {
+ return errors.New("writing sigstore attachments is disabled by configuration")
+ }
+
+ ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
+ if err != nil {
+ return err
+ }
+ var ociConfig imgspecv1.Image // Most fields empty by default
+ if ociManifest == nil {
+ ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Digest: "", // We will fill this in later.
+ Size: 0,
+ }, nil)
+ ociConfig.RootFS.Type = "layers"
+ } else {
+ logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize,
+ none.NoCache)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(configBlob, &ociConfig); err != nil {
+ return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(),
+ d.ref.ref.Name(), err)
+ }
+ }
+
+ // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
+ ociManifest.Layers = slices.Clone(ociManifest.Layers)
+ // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
+ // the slice in the original object (or in a newly allocated object).
+ for _, sig := range signatures {
+ mimeType := sig.UntrustedMIMEType()
+ payloadBlob := sig.UntrustedPayload()
+ annotations := sig.UntrustedAnnotations()
+
+ alreadyOnRegistry := false
+ for _, layer := range ociManifest.Layers {
+ if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) {
+ logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String())
+ alreadyOnRegistry = true
+ break
+ }
+ }
+ if alreadyOnRegistry {
+ continue
+ }
+
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: false,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
+ }
+ sigDesc.Annotations = annotations
+ ociManifest.Layers = append(ociManifest.Layers, sigDesc)
+ ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest)
+ logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String())
+ }
+
+ configBlob, err := json.Marshal(ociConfig)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Uploading updated sigstore attachment config")
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: true,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
}
+ ociManifest.Config = configDesc
+
+ manifestBlob, err := ociManifest.Serialize()
+ if err != nil {
+ return err
+ }
+ attachmentTag, err := sigstoreAttachmentTag(manifestDigest)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Uploading sigstore attachment manifest")
+ return d.uploadManifest(ctx, manifestBlob, attachmentTag)
+}
+
+func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
+ payloadBlob []byte, annotations map[string]string) bool {
+ if layer.MediaType != mimeType ||
+ layer.Size != int64(len(payloadBlob)) ||
+ // This is not quite correct, we should use the layer’s digest algorithm.
+ // But right now we don’t want to deal with corner cases like bad digest formats
+ // or unavailable algorithms; in the worst case we end up with duplicate signature
+ // entries.
+ layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
+ !maps.Equal(layer.Annotations, annotations) {
+ return false
+ }
+ return true
+}
+
+// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
+// OCI descriptor.
+func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
+ blobDigest := digest.FromBytes(contents)
+ info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
+ types.BlobInfo{
+ Digest: blobDigest,
+ Size: int64(len(contents)),
+ MediaType: mimeType,
+ }, options)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err)
+ }
+ return imgspecv1.Descriptor{
+ MediaType: mimeType,
+ Digest: info.Digest,
+ Size: info.Size,
+ }, nil
}
-// deleteOneSignature deletes a signature from url, if it exists.
+// deleteOneSignature deletes a signature from sigURL, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
- switch url.Scheme {
+func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) {
+ switch sigURL.Scheme {
case "file":
- logrus.Debugf("Deleting %s", url.Path)
- err := os.Remove(url.Path)
+ logrus.Debugf("Deleting %s", sigURL.Path)
+ err := os.Remove(sigURL.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
case "http", "https":
- return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
default:
- return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
+ return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted())
}
}
-// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
+// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
// for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
@@ -602,17 +865,22 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
if err != nil {
return err
}
- existingSigNames := map[string]struct{}{}
+ existingSigNames := set.New[string]()
for _, sig := range existingSignatures.Signatures {
- existingSigNames[sig.Name] = struct{}{}
+ existingSigNames.Add(sig.Name)
}
-sigExists:
- for _, newSig := range signatures {
- for _, existingSig := range existingSignatures.Signatures {
- if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
- continue sigExists
- }
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
+ return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
+ }) {
+ continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@@ -621,10 +889,10 @@ sigExists:
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
- return errors.Wrapf(err, "Error generating random signature len %d", n)
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
- if _, ok := existingSigNames[signatureName]; !ok {
+ if !existingSigNames.Contains(signatureName) {
break
}
}
@@ -639,29 +907,26 @@ sigExists:
return err
}
+ // manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above.
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
- res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
- body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize)
- if err == nil {
- logrus.Debugf("Error body %s", string(body))
- }
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
- return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
+ return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res))
}
}
return nil
}
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 6916b7dad..6e44ce096 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -1,30 +1,49 @@
package docker
import (
+ "bytes"
"context"
+ "encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "math"
"mime"
+ "mime/multipart"
"net/http"
"net/url"
"os"
- "strconv"
+ "os/exec"
"strings"
+ "sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server,
+// even if it were broken or malicious and it continued serving an enormous number of items.
+const maxLookasideSignatures = 128
+
type dockerImageSource struct {
- logicalRef dockerReference // The reference the user requested.
- physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.ImplementsGetBlobAt
+
+ logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest
+ physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest
c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
@@ -33,10 +52,19 @@ type dockerImageSource struct {
// newImageSource creates a new ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
+// The caller must ensure !ref.isUnknownDigest.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ if ref.isUnknownDigest {
+ return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport())
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil {
- return nil, errors.Wrapf(err, "error loading registries configuration")
+ return nil, fmt.Errorf("loading registries configuration: %w", err)
}
if registry == nil {
// No configuration was found for the provided reference, so use the
@@ -69,8 +97,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
} else {
logrus.Debugf("Trying to access %q", pullSource.Reference)
}
- logrus.Debugf("Trying to access %q", pullSource.Reference)
- s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig)
if err == nil {
return s, nil
}
@@ -89,20 +116,21 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// Don’t just build a string, try to preserve the typed error.
primary := &attempts[len(attempts)-1]
extras := []string{}
- for i := 0; i < len(attempts)-1; i++ {
+ for _, attempt := range attempts[:len(attempts)-1] {
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
// The paired [] at least have some chance of being unambiguous.
- extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
+ extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err))
}
- return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
+ return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
}
}
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
// The caller must call .Close() on the returned ImageSource.
-func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
- physicalRef, err := newReference(pullSource.Reference)
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
+ registryConfig *registryConfiguration) (*dockerImageSource, error) {
+ physicalRef, err := newReference(pullSource.Reference, false)
if err != nil {
return nil, err
}
@@ -116,21 +144,55 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
endpointSys = ©
}
- client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
s := &dockerImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+
logicalRef: logicalRef,
physicalRef: physicalRef,
c: client,
}
+ s.Compat = impl.AddCompat(s)
if err := s.ensureManifestIsLoaded(ctx); err != nil {
+ client.Close()
return nil, err
}
+
+ if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" {
+ acf := map[string]struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ IdentityToken string `json:"identityToken,omitempty"`
+ }{
+ physicalRef.ref.String(): {
+ Username: client.auth.Username,
+ Password: client.auth.Password,
+ IdentityToken: client.auth.IdentityToken,
+ },
+ }
+ acfD, err := json.Marshal(acf)
+ if err != nil {
+ logrus.Warnf("failed to marshal auth config: %v", err)
+ } else {
+ cmd := exec.Command(h)
+ cmd.Stdin = bytes.NewReader(acfD)
+ if err := cmd.Run(); err != nil {
+ var stderr string
+ if ee, ok := err.(*exec.ExitError); ok {
+ stderr = string(ee.Stderr)
+ }
+ logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err)
+ }
+ }
+ }
return s, nil
}
@@ -142,19 +204,7 @@ func (s *dockerImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *dockerImageSource) Close() error {
- return nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
+ return s.c.Close()
}
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
@@ -176,6 +226,9 @@ func simplifyContentType(contentType string) string {
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
+ if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
+ return nil, "", err
+ }
return s.fetchManifest(ctx, instanceDigest.String())
}
err := s.ensureManifestIsLoaded(ctx)
@@ -185,26 +238,10 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
return s.cachedManifest, s.cachedManifestMIMEType, nil
}
+// fetchManifest fetches a manifest for tagOrDigest.
+// The caller is responsible for ensuring tagOrDigest uses the expected format.
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
- path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
- headers := map[string][]string{
- "Accept": manifest.DefaultRequestedManifestMIMETypes,
- }
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
- if err != nil {
- return nil, "", err
- }
- logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
- defer res.Body.Close()
- if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
- }
-
- manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
- if err != nil {
- return nil, "", err
- }
- return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+ return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
}
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
@@ -234,86 +271,214 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
return nil
}
-func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
- var (
- resp *http.Response
- err error
- )
- if len(urls) == 0 {
- return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
- }
- for _, url := range urls {
- // NOTE: we must not authenticate on additional URLs as those
- // can be abused to leak credentials or tokens. Please
- // refer to CVE-2020-15157 for more information.
- resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
- if err == nil {
- if resp.StatusCode != http.StatusOK {
- err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
- logrus.Debug(err)
- resp.Body.Close()
- continue
+// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
+func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
+ defer close(streams)
+ defer close(errs)
+ currentOffset := uint64(0)
+
+ body = makeBufferedNetworkReader(body, 64, 16384)
+ defer body.Close()
+ for _, c := range chunks {
+ if c.Offset != currentOffset {
+ if c.Offset < currentOffset {
+ errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset)
+ break
}
- break
+ toSkip := c.Offset - currentOffset
+ if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
+ errs <- err
+ break
+ }
+ currentOffset += toSkip
+ }
+ var reader io.Reader
+ if c.Length == math.MaxUint64 {
+ reader = body
+ } else {
+ reader = io.LimitReader(body, int64(c.Length))
}
+ s := signalCloseReader{
+ closed: make(chan struct{}),
+ stream: io.NopCloser(reader),
+ consumeStream: true,
+ }
+ streams <- s
+
+ // Wait until the stream is closed before going to the next chunk
+ <-s.closed
+ currentOffset += c.Length
}
- if err != nil {
- return nil, 0, err
+}
+
+// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
+func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
+ defer close(streams)
+ defer close(errs)
+ if !strings.HasPrefix(mediaType, "multipart/") {
+ streams <- body
+ return
+ }
+ boundary, found := params["boundary"]
+ if !found {
+ errs <- errors.New("could not find boundary")
+ body.Close()
+ return
+ }
+ buffered := makeBufferedNetworkReader(body, 64, 16384)
+ defer buffered.Close()
+ mr := multipart.NewReader(buffered, boundary)
+ parts := 0
+ for {
+ p, err := mr.NextPart()
+ if err != nil {
+ if err != io.EOF {
+ errs <- err
+ }
+ if parts != len(chunks) {
+ errs <- errors.New("invalid number of chunks returned by the server")
+ }
+ return
+ }
+ s := signalCloseReader{
+ closed: make(chan struct{}),
+ stream: p,
+ }
+ streams <- s
+ // NextPart() cannot be called while the current part
+ // is being read, so wait until it is closed
+ <-s.closed
+ parts++
}
- return resp.Body, getBlobSize(resp), nil
}
-func getBlobSize(resp *http.Response) int64 {
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
+
+func parseMediaType(contentType string) (string, map[string]string, error) {
+ mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
- size = -1
+ if err == mime.ErrInvalidMediaParameter {
+ // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary
+ // param, let's handle it here.
+ matches := multipartByteRangesRe.FindStringSubmatch(contentType)
+ if len(matches) == 2 {
+ mediaType = "multipart/byteranges"
+ params = map[string]string{
+ "boundary": matches[1],
+ }
+ err = nil
+ }
+ }
+ if err != nil {
+ return "", nil, err
+ }
}
- return size
+ return mediaType, params, err
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+// If the Length for the last chunk is set to math.MaxUint64, then it
+// fully fetches the remaining data from the offset to the end of the blob.
+func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ headers := make(map[string][]string)
+
+ rangeVals := make([]string, 0, len(chunks))
+ lastFound := false
+ for _, c := range chunks {
+ if lastFound {
+ return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk")
+ }
+ // If the Length is set to -1, then request anything after the specified offset.
+ if c.Length == math.MaxUint64 {
+ lastFound = true
+ rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset))
+ } else {
+ rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
+ }
+ }
+
+ headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
- return s.getExternalBlob(ctx, info.URLs)
+ return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
}
+ if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
+ return nil, nil, err
+ }
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil {
- return nil, 0, err
+ return nil, nil, err
}
- if err := httpResponseToError(res, "Error fetching blob"); err != nil {
+
+ switch res.StatusCode {
+ case http.StatusOK:
+ // if the server replied with a 200 status code, convert the full body response to a series of
+ // streams as it would have been done with 206.
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
+ return streams, errs, nil
+ case http.StatusPartialContent:
+ mediaType, params, err := parseMediaType(res.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+
+ go handle206Response(streams, errs, res.Body, chunks, mediaType, params)
+ return streams, errs, nil
+ case http.StatusBadRequest:
+ res.Body.Close()
+ return nil, nil, private.BadPartialRequestError{Status: res.Status}
+ default:
+ err := registryHTTPResponseToError(res)
res.Body.Close()
- return nil, 0, err
+ return nil, nil, fmt.Errorf("fetching partial blob: %w", err)
}
- cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
- return res.Body, getBlobSize(res), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.c.getBlob(ctx, s.physicalRef, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if err := s.c.detectProperties(ctx); err != nil {
return nil, err
}
+ var res []signature.Signature
switch {
case s.c.supportsSignatures:
- return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil {
+ return nil, err
+ }
case s.c.signatureBase != nil:
- return s.getSignaturesFromLookaside(ctx, instanceDigest)
+ if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil {
+ return nil, err
+ }
default:
- return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+
+ if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil {
+ return nil, err
}
+ return res, nil
}
// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
@@ -334,97 +499,169 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
return manifest.Digest(s.cachedManifest)
}
-// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
-// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
+// which is not nil, storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
- return nil, err
+ return err
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
- signatures := [][]byte{}
for i := 0; ; i++ {
- url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
- signature, missing, err := s.getOneSignature(ctx, url)
+ if i >= maxLookasideSignatures {
+ return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
+ }
+
+ sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
if err != nil {
- return nil, err
+ return err
+ }
+ signature, missing, err := s.getOneSignature(ctx, sigURL)
+ if err != nil {
+ return err
}
if missing {
break
}
- signatures = append(signatures, signature)
+ *dest = append(*dest, signature)
}
- return signatures, nil
+ return nil
}
-// getOneSignature downloads one signature from url.
-// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
+// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
+// If it successfully determines that the signature does not exist, returns (nil, true, nil).
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
- switch url.Scheme {
+func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) {
+ switch sigURL.Scheme {
case "file":
- logrus.Debugf("Reading %s", url.Path)
- sig, err := ioutil.ReadFile(url.Path)
+ logrus.Debugf("Reading %s", sigURL.Path)
+ sigBlob, err := os.ReadFile(sigURL.Path)
if err != nil {
if os.IsNotExist(err) {
return nil, true, nil
}
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err)
+ }
return sig, false, nil
case "http", "https":
- logrus.Debugf("GET %s", url)
- req, err := http.NewRequest("GET", url.String(), nil)
+ logrus.Debugf("GET %s", sigURL.Redacted())
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil)
if err != nil {
return nil, false, err
}
- req = req.WithContext(ctx)
res, err := s.c.client.Do(req)
if err != nil {
return nil, false, err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
+ logrus.Debugf("... got status 404, as expected = end of signatures")
return nil, true, nil
} else if res.StatusCode != http.StatusOK {
- return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
+ return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
+ }
+
+ contentType := res.Header.Get("Content-Type")
+ if mimeType := simplifyContentType(contentType); mimeType == "text/html" {
+ logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType)
+ // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type.
+ // If the content really is HTML, it’s going to fail in signature.FromBlob.
}
- sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
+
+ sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
if err != nil {
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err)
+ }
return sig, false, nil
default:
- return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
+ return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted())
}
}
-// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
- return nil, err
+ return err
}
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
if err != nil {
- return nil, err
+ return err
}
- var sigs [][]byte
for _, sig := range parsedBody.Signatures {
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
+ *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return nil
+}
+
+// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
+ if !s.c.useSigstoreAttachments {
+ logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
+ return nil
+ }
+
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return err
+ }
+
+ ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
+ if err != nil {
+ return err
+ }
+ if ociManifest == nil {
+ return nil
+ }
+
+ logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
+ for layerIndex, layer := range ociManifest.Layers {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
+ none.NoCache)
+ if err != nil {
+ return err
}
+ *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
}
- return sigs, nil
+ return nil
}
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
+ if ref.isUnknownDigest {
+ return fmt.Errorf("Docker reference without a tag or digest cannot be deleted")
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return err
+ }
// docker/distribution does not document what action should be used for deleting images.
//
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
@@ -432,10 +669,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
//
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
- c, err := newDockerClientFromRef(sys, ref, true, "*")
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*")
if err != nil {
return err
}
+ defer c.Close()
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
@@ -445,50 +683,46 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
- get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
+ get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
defer get.Body.Close()
- manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
- if err != nil {
- return err
- }
switch get.StatusCode {
case http.StatusOK:
case http.StatusNotFound:
- return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
default:
- return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
+ return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get))
}
-
- digest := get.Header.Get("Docker-Content-Digest")
- deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
-
- // When retrieving the digest from a registry >= 2.3 use the following header:
- // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
- delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
+ manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
if err != nil {
return err
}
- defer delete.Body.Close()
- body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize)
+ manifestDigest, err := manifest.Digest(manifestBody)
if err != nil {
- return err
- }
- if delete.StatusCode != http.StatusAccepted {
- return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
+ return fmt.Errorf("computing manifest digest: %w", err)
}
+ deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest)
- manifestDigest, err := manifest.Digest(manifestBody)
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
+ defer delete.Body.Close()
+ if delete.StatusCode != http.StatusAccepted {
+ return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete))
+ }
for i := 0; ; i++ {
- url := signatureStorageURL(c.signatureBase, manifestDigest, i)
- missing, err := c.deleteOneSignature(url)
+ sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
+ if err != nil {
+ return err
+ }
+ missing, err := c.deleteOneSignature(sigURL)
if err != nil {
return err
}
@@ -499,3 +733,127 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return nil
}
+
+type bufferedNetworkReaderBuffer struct {
+ data []byte
+ len int
+ consumed int
+ err error
+}
+
+type bufferedNetworkReader struct {
+ stream io.ReadCloser
+ emptyBuffer chan *bufferedNetworkReaderBuffer
+ readyBuffer chan *bufferedNetworkReaderBuffer
+ terminate chan bool
+ current *bufferedNetworkReaderBuffer
+ mutex sync.Mutex
+ gotEOF bool
+}
+
+// handleBufferedNetworkReader runs in a goroutine
+func handleBufferedNetworkReader(br *bufferedNetworkReader) {
+ defer close(br.readyBuffer)
+ for {
+ select {
+ case b := <-br.emptyBuffer:
+ b.len, b.err = br.stream.Read(b.data)
+ br.readyBuffer <- b
+ if b.err != nil {
+ return
+ }
+ case <-br.terminate:
+ return
+ }
+ }
+}
+
+func (n *bufferedNetworkReader) Close() error {
+ close(n.terminate)
+ close(n.emptyBuffer)
+ return n.stream.Close()
+}
+
+func (n *bufferedNetworkReader) read(p []byte) (int, error) {
+ if n.current != nil {
+ copied := copy(p, n.current.data[n.current.consumed:n.current.len])
+ n.current.consumed += copied
+ if n.current.consumed == n.current.len {
+ n.emptyBuffer <- n.current
+ n.current = nil
+ }
+ if copied > 0 {
+ return copied, nil
+ }
+ }
+ if n.gotEOF {
+ return 0, io.EOF
+ }
+
+ var b *bufferedNetworkReaderBuffer
+
+ select {
+ case b = <-n.readyBuffer:
+ if b.err != nil {
+ if b.err != io.EOF {
+ return b.len, b.err
+ }
+ n.gotEOF = true
+ }
+ b.consumed = 0
+ n.current = b
+ return n.read(p)
+ case <-n.terminate:
+ return 0, io.EOF
+ }
+}
+
+func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
+ n.mutex.Lock()
+ defer n.mutex.Unlock()
+
+ return n.read(p)
+}
+
+func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader {
+ br := bufferedNetworkReader{
+ stream: stream,
+ emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
+ readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
+ terminate: make(chan bool),
+ }
+
+ go func() {
+ handleBufferedNetworkReader(&br)
+ }()
+
+ for range nBuffers {
+ b := bufferedNetworkReaderBuffer{
+ data: make([]byte, bufferSize),
+ }
+ br.emptyBuffer <- &b
+ }
+
+ return &br
+}
+
+type signalCloseReader struct {
+ closed chan struct{}
+ stream io.ReadCloser
+ consumeStream bool
+}
+
+func (s signalCloseReader) Read(p []byte) (int, error) {
+ return s.stream.Read(p)
+}
+
+func (s signalCloseReader) Close() error {
+ defer close(s.closed)
+ if s.consumeStream {
+ if _, err := io.Copy(io.Discard, s.stream); err != nil {
+ s.stream.Close()
+ return err
+ }
+ }
+ return s.stream.Close()
+}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go
index 8b8e57968..c10463a43 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go
@@ -2,6 +2,7 @@ package docker
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -9,14 +10,18 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
+// UnknownDigestSuffix can be appended to a reference when the caller
+// wants to push an image without a tag or digest.
+// NewReferenceUnknownDigest() is called when this const is detected.
+const UnknownDigestSuffix = "@@unknown-digest@@"
+
func init() {
transports.Register(Transport)
}
-// Transport is an ImageTransport for Docker registry-hosted images.
+// Transport is an ImageTransport for container registry-hosted images.
var Transport = dockerTransport{}
type dockerTransport struct{}
@@ -43,31 +48,51 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
// dockerReference is an ImageReference for Docker images.
type dockerReference struct {
- ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
+ ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true
+ isUnknownDigest bool
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
- if !strings.HasPrefix(refString, "//") {
- return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
+ refString, ok := strings.CutPrefix(refString, "//")
+ if !ok {
+ return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
- ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
+ refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix)
+ ref, err := reference.ParseNormalizedNamed(refString)
if err != nil {
return nil, err
}
+
+ if unknownDigest {
+ if !reference.IsNameOnly(ref) {
+ return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix)
+ }
+ return NewReferenceUnknownDigest(ref)
+ }
+
ref = reference.TagNameOnly(ref)
return NewReference(ref)
}
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) {
- return newReference(ref)
+ return newReference(ref, false)
+}
+
+// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting
+// a tag on the registry. The reference must satisfy reference.IsNameOnly()
+func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) {
+ return newReference(ref, true)
}
// newReference returns a dockerReference for a named reference.
-func newReference(ref reference.Named) (dockerReference, error) {
- if reference.IsNameOnly(ref) {
- return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) {
+ if reference.IsNameOnly(ref) && !unknownDigest {
+ return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref))
+ }
+ if !reference.IsNameOnly(ref) && unknownDigest {
+ return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
@@ -77,11 +102,12 @@ func newReference(ref reference.Named) (dockerReference, error) {
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported")
}
return dockerReference{
- ref: ref,
+ ref: ref,
+ isUnknownDigest: unknownDigest,
}, nil
}
@@ -92,10 +118,14 @@ func (ref dockerReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
- return "//" + reference.FamiliarString(ref.ref)
+ famString := "//" + reference.FamiliarString(ref.ref)
+ if ref.isUnknownDigest {
+ return famString + UnknownDigestSuffix
+ }
+ return famString
}
// DockerReference returns a Docker reference associated with this reference
@@ -113,6 +143,9 @@ func (ref dockerReference) DockerReference() reference.Named {
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref dockerReference) PolicyConfigurationIdentity() string {
+ if ref.isUnknownDigest {
+ return ref.ref.Name()
+ }
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
@@ -126,7 +159,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref dockerReference) PolicyConfigurationNamespaces() []string {
- return policyconfiguration.DockerReferenceNamespaces(ref.ref)
+ namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref)
+ if ref.isUnknownDigest {
+ if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() {
+ namespaces = namespaces[1:]
+ }
+ }
+ return namespaces
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
@@ -163,6 +202,10 @@ func (ref dockerReference) tagOrDigest() (string, error) {
if ref, ok := ref.ref.(reference.NamedTagged); ok {
return ref.Tag(), nil
}
+
+ if ref.isUnknownDigest {
+ return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+ }
// This should not happen, NewReference above refuses reference.IsNameOnly values.
- return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+ return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
index 5b5008af7..e749b5014 100644
--- a/vendor/github.com/containers/image/v5/docker/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -5,14 +5,15 @@ import (
"fmt"
"net/http"
- "github.com/docker/distribution/registry/client"
- perrors "github.com/pkg/errors"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/sirupsen/logrus"
)
var (
// ErrV1NotSupported is returned when we're trying to talk to a
// docker V1 registry.
- ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
+ // Deprecated: The V1 container registry detection is no longer performed, so this error is never returned.
+ ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
// ErrTooManyRequests is returned when the status code returned is 429
ErrTooManyRequests = errors.New("too many requests to registry")
)
@@ -28,6 +29,7 @@ func (e ErrUnauthorizedForCredentials) Error() string {
// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns
// nil if the response is not considered an error.
+// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead.
func httpResponseToError(res *http.Response, context string) error {
switch res.StatusCode {
case http.StatusOK:
@@ -35,26 +37,66 @@ func httpResponseToError(res *http.Response, context string) error {
case http.StatusTooManyRequests:
return ErrTooManyRequests
case http.StatusUnauthorized:
- err := client.HandleErrorResponse(res)
+ err := registryHTTPResponseToError(res)
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
- context = context + ": "
+ context += ": "
}
- return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
+ return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
-// registry
+// registry.
+//
+// WARNING: The OCI distribution spec says
+// “A `4XX` response code from the registry MAY return a body in any format.â€; but if it is
+// JSON, it MUST use the errcode.Error structure.
+// So, callers should primarily decide based on HTTP StatusCode, not based on error type here.
func registryHTTPResponseToError(res *http.Response) error {
- errResponse := client.HandleErrorResponse(res)
- if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
+ err := handleErrorResponse(res)
+ // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.
+ if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 {
+ // The docker/distribution registry implementation almost never returns
+ // more than one error in the HTTP body; it seems there is only one
+ // possible instance, where the second error reports a cleanup failure
+ // we don't really care about.
+ //
+ // The only _common_ case where a multi-element error is returned is
+ // created by the handleErrorResponse parser when OAuth authorization fails:
+ // the first element contains errors from a WWW-Authenticate header, the second
+ // element contains errors from the response body.
+ //
+ // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized
+ // for invalid tokens, ErrorCodeDenied for permission denied with a valid token
+ // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.)
+ //
+ // Also, docker/docker similarly only logs the other errors and returns the
+ // first one.
+ if len(errs) > 1 {
+ logrus.Debugf("Discarding non-primary errors:")
+ for _, err := range errs[1:] {
+ logrus.Debugf(" %s", err.Error())
+ }
+ }
+ err = errs[0]
+ }
+ switch e := err.(type) {
+ case *unexpectedHTTPResponseError:
response := string(e.Response)
if len(response) > 50 {
response = response[:50] + "..."
}
- errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
+ // %.0w makes e visible to error.Unwrap() without including any text
+ err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e)
+ case errcode.Error:
+ // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
+ // rather redundant. So reword it without using e.Code.Error() if e.Message is the default.
+ if e.Message == e.Code.Message() {
+ // %.0w makes e visible to error.Unwrap() without including any text
+ err = fmt.Errorf("%s%.0w", e.Message, e)
+ }
}
- return errResponse
+ return err
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
index 9559dfb56..f14234680 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -4,40 +4,69 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
- "os"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+// Destination is a partial implementation of private.ImageDestination for writing to a Writer.
type Destination struct {
- archive *Writer
- repoTags []reference.NamedTagged
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ archive *Writer
+ commitWithOptions func(ctx context.Context, options private.CommitOptions) error
+ repoTags []reference.NamedTagged
// Other state.
config []byte
sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
-func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
+// commitWithOptions implements ImageDestination.CommitWithOptions.
+func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged,
+ commitWithOptions func(ctx context.Context, options private.CommitOptions) error) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
- return &Destination{
- archive: archive,
- repoTags: repoTags,
- sysCtx: sys,
- }
+ dest := &Destination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ },
+ DesiredLayerCompression: types.Decompress,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+ // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
+ // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
+ // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
+ NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
+
+ archive: archive,
+ commitWithOptions: commitWithOptions,
+ repoTags: repoTags,
+ sysCtx: sys,
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest
}
// AddRepoTags adds the specified tags to the destination's repoTags.
@@ -45,128 +74,78 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
-// SupportedManifestMIMETypes tells which manifest mime types the destination supports
-// If an empty slice or nil it's returned, then any mime type can be tried to upload
-func (d *Destination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *Destination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *Destination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *Destination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *Destination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *Destination) HasThreadSafePutBlob() bool {
- // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
- // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
- // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
- if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
+ if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
- streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
if err != nil {
- return types.BlobInfo{}, err
- }
- defer os.Remove(streamCopy.Name())
- defer streamCopy.Close()
-
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(streamCopy, tee)
- if err != nil {
- return types.BlobInfo{}, err
- }
- _, err = streamCopy.Seek(0, io.SeekStart)
- if err != nil {
- return types.BlobInfo{}, err
- }
- inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
- if inputInfo.Digest == "" {
- inputInfo.Digest = digester.Digest()
+ return private.UploadedBlob{}, err
}
+ defer cleanup()
stream = streamCopy
logrus.Debugf("... streaming done")
}
if err := d.archive.lock(); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
defer d.archive.unlock()
// Maybe the blob has been already sent
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
if ok {
- return reusedInfo, nil
+ return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
}
- if isConfig {
+ if options.IsConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
+ return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
}
d.config = buf
- if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
+ configPath, err := d.archive.configPath(inputInfo.Digest)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
}
} else {
- if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
- return types.BlobInfo{}, err
+ layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
+ return private.UploadedBlob{}, err
}
}
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
- return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
+ return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
+// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ return false, private.ReusedBlob{}, nil
+ }
if err := d.archive.lock(); err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
defer d.archive.unlock()
@@ -187,10 +166,10 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
- return errors.Wrap(err, "Error parsing manifest")
+ return fmt.Errorf("parsing manifest: %w", err)
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
- return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ return errors.New("Unsupported manifest type, need a Docker schema 2 manifest")
}
if err := d.archive.lock(); err != nil {
@@ -205,15 +184,12 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
}
-// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
-func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if instanceDigest != nil {
- return errors.Errorf(`Manifest lists are not supported for docker tar files`)
- }
- if len(signatures) != 0 {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
- }
- return nil
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *Destination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ // This indirection exists because impl.Compat expects all ImageDestinationInternalOnly methods
+ // to be implemented in one place.
+ return d.commitWithOptions(ctx, options)
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
index 83de0c520..362657596 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
@@ -3,8 +3,9 @@ package tarfile
import (
"archive/tar"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path"
@@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
@@ -30,19 +30,23 @@ type Reader struct {
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
file, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "error opening file %q", path)
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
defer file.Close()
- // If the file is already not compressed we can just return the file itself
+ // If the file is seekable and already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewReaderFromStream.
- stream, isCompressed, err := compression.AutoDecompress(file)
- if err != nil {
- return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
- }
- defer stream.Close()
- if !isCompressed {
- return newReader(path, false)
+ var stream io.Reader = file
+ if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible
+ decompressed, isCompressed, err := compression.AutoDecompress(file)
+ if err != nil {
+ return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
+ }
+ defer decompressed.Close()
+ stream = decompressed
+ if !isCompressed {
+ return newReader(path, false)
+ }
}
return NewReaderFromStream(sys, stream)
}
@@ -53,9 +57,9 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
- tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
+ tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
if err != nil {
- return nil, errors.Wrap(err, "error creating temporary file")
+ return nil, fmt.Errorf("creating temporary file: %w", err)
}
defer tarCopyFile.Close()
@@ -71,7 +75,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
- return nil, errors.Wrap(err, "Error auto-decompressing input")
+ return nil, fmt.Errorf("auto-decompressing input: %w", err)
}
defer uncompressedStream.Close()
@@ -80,7 +84,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
- return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
+ return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err)
}
succeeded = true
@@ -113,7 +117,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) {
return nil, err
}
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
- return nil, errors.Wrap(err, "Error decoding tar manifest.json")
+ return nil, fmt.Errorf("decoding tar manifest.json: %w", err)
}
succeeded = true
@@ -137,7 +141,7 @@ func (r *Reader) Close() error {
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
switch {
case ref != nil && sourceIndex != -1:
- return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+ return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
ref.String(), sourceIndex)
case ref != nil:
@@ -146,25 +150,25 @@ func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int)
for tagIndex, tag := range r.Manifest[i].RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
+ return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err)
}
if parsedTag.String() == refString {
return &r.Manifest[i], tagIndex, nil
}
}
}
- return nil, -1, errors.Errorf("Tag %#v not found", refString)
+ return nil, -1, fmt.Errorf("Tag %#v not found", refString)
case sourceIndex != -1:
if sourceIndex >= len(r.Manifest) {
- return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
+ return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available",
sourceIndex, len(r.Manifest))
}
return &r.Manifest[sourceIndex], -1, nil
default:
if len(r.Manifest) != 1 {
- return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
+ return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
}
return &r.Manifest[0], -1, nil
}
@@ -227,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
}
if !header.FileInfo().Mode().IsRegular() {
- return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
@@ -258,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
- return nil, errors.Wrapf(err, "Error loading tar component %s", path)
+ return nil, fmt.Errorf("loading tar component %q: %w", path, err)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
index bd65ef844..3364e6c9f 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
@@ -5,23 +5,31 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path"
"sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
archive *Reader
closeArchive bool // .Close() the archive when the source is closed.
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
@@ -47,13 +55,20 @@ type layerInfo struct {
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
// and sourceIndex (or the only image if they are (nil, -1)).
// The archive will be closed if closeArchive
-func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
- return &Source{
+func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source {
+ s := &Source{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName),
+
archive: archive,
closeArchive: closeArchive,
ref: ref,
sourceIndex: sourceIndex,
}
+ s.Compat = impl.AddCompat(s)
+ return s
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
@@ -80,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
+ return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err)
}
if parsedConfig.RootFS == nil {
- return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
+ return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
@@ -116,7 +131,7 @@ func (s *Source) TarManifest() []ManifestItem {
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
- return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[digest.Digest]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
@@ -129,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
layerPath := path.Clean(tarManifest.Layers[i])
if _, ok := unknownLayerSizes[layerPath]; ok {
- return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
@@ -164,15 +179,15 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
- return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
+ return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err)
}
defer uncompressedStream.Close()
uncompressedSize := h.Size
if isCompressed {
- uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
+ uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
if err != nil {
- return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
+ return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err)
}
}
li.size = uncompressedSize
@@ -180,7 +195,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
}
if len(unknownLayerSizes) != 0 {
- return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
}
return knownLayers, nil
@@ -214,7 +229,7 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
- return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
Digest: diffID, // diffID is a digest of the uncompressed tarball
@@ -249,11 +264,6 @@ func (r uncompressedReadCloser) Close() error {
return res
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *Source) HasThreadSafeGetBlob() bool {
- return true
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -263,7 +273,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
}
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
- return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
@@ -292,7 +302,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
if err != nil {
- return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
+ return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err)
}
newStream := uncompressedReadCloser{
@@ -305,27 +315,5 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
return newStream, li.size, nil
}
- return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
- }
- return [][]byte{}, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as the primary manifest can not be a list, so there can be no secondary manifests.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
+ return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
index e0683b3cd..883c06117 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
@@ -4,18 +4,21 @@ import (
"archive/tar"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
+ "slices"
"sync"
"time"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +32,7 @@ type Writer struct {
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
- legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
+ legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
@@ -42,7 +45,7 @@ func NewWriter(dest io.Writer) *Writer {
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
- legacyLayers: map[string]struct{}{},
+ legacyLayers: set.New[string](),
manifestByConfig: map[digest.Digest]int{},
}
}
@@ -67,17 +70,17 @@ func (w *Writer) unlock() {
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
-func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
+func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
- return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
+ return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
}
- return false, types.BlobInfo{}, nil
+ return false, private.ReusedBlob{}, nil
}
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
@@ -89,24 +92,27 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) {
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
- if _, ok := w.legacyLayers[layerID]; !ok {
+ if !w.legacyLayers.Contains(layerID) {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
- physicalLayerPath := w.physicalLayerPath(layerDigest)
+ physicalLayerPath, err := w.physicalLayerPath(layerDigest)
+ if err != nil {
+ return err
+ }
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
- return errors.Wrap(err, "Error creating layer symbolic link")
+ return fmt.Errorf("creating layer symbolic link: %w", err)
}
b := []byte("1.0")
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
- return errors.Wrap(err, "Error writing VERSION file")
+ return fmt.Errorf("writing VERSION file: %w", err)
}
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
- return errors.Wrap(err, "Error writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
- w.legacyLayers[layerID] = struct{}{}
+ w.legacyLayers.Add(layerID)
}
return nil
}
@@ -117,7 +123,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
- layerConfig := make(map[string]interface{})
+ layerConfig := make(map[string]any)
// The root layer doesn't have any parent
if lastLayerID != "" {
@@ -128,7 +134,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
var config map[string]*json.RawMessage
err := json.Unmarshal(configBytes, &config)
if err != nil {
- return errors.Wrap(err, "Error unmarshaling config")
+ return fmt.Errorf("unmarshaling config: %w", err)
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
@@ -136,6 +142,9 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
}
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
+ if err := l.Digest.Validate(); err != nil { // This should never fail on this code path, still: make sure the chainID computation is unambiguous.
+ return err
+ }
if chainID == "" {
chainID = l.Digest
} else {
@@ -152,15 +161,15 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
layerConfig["layer_id"] = chainID
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
if err != nil {
- return errors.Wrap(err, "Error marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
- layerID := digest.Canonical.FromBytes(b).Hex()
+ layerID := digest.Canonical.FromBytes(b).Encoded()
layerConfig["id"] = layerID
configBytes, err := json.Marshal(layerConfig)
if err != nil {
- return errors.Wrap(err, "Error marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
@@ -188,14 +197,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
- if len(a.Layers) != len(b.Layers) {
+ if !slices.Equal(a.Layers, b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
- for i := range a.Layers {
- if a.Layers[i] != b.Layers[i] {
- return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
- }
- }
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we don’t set to anything meaningful.
return nil
@@ -206,12 +210,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
layerPaths := []string{}
for _, l := range layerDescriptors {
- layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
+ p, err := w.physicalLayerPath(l.Digest)
+ if err != nil {
+ return err
+ }
+ layerPaths = append(layerPaths, p)
}
var item *ManifestItem
+ configPath, err := w.configPath(configDigest)
+ if err != nil {
+ return err
+ }
newItem := ManifestItem{
- Config: w.configPath(configDigest),
+ Config: configPath,
RepoTags: []string{},
Layers: layerPaths,
Parent: "", // We don’t have this information
@@ -229,9 +241,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
item = &w.manifest[i]
}
- knownRepoTags := map[string]struct{}{}
+ knownRepoTags := set.New[string]()
for _, repoTag := range item.RepoTags {
- knownRepoTags[repoTag] = struct{}{}
+ knownRepoTags.Add(repoTag)
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
@@ -252,9 +264,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
- if _, ok := knownRepoTags[refString]; !ok {
+ if !knownRepoTags.Contains(refString) {
item.RepoTags = append(item.RepoTags, refString)
- knownRepoTags[refString] = struct{}{}
+ knownRepoTags.Add(refString)
}
}
@@ -280,10 +292,10 @@ func (w *Writer) Close() error {
b, err = json.Marshal(w.repositories)
if err != nil {
- return errors.Wrap(err, "Error marshaling repositories")
+ return fmt.Errorf("marshaling repositories: %w", err)
}
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
- return errors.Wrap(err, "Error writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
if err := w.tar.Close(); err != nil {
@@ -296,21 +308,27 @@ func (w *Writer) Close() error {
// configPath returns a path we choose for storing a config with the specified digest.
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
-func (w *Writer) configPath(configDigest digest.Digest) string {
- return configDigest.Hex() + ".json"
+func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
+ if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
+ return "", err
+ }
+ return configDigest.Encoded() + ".json", nil
}
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
-func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
- // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
+func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
+ if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
+ return "", err
+ }
+ // Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
- return layerDigest.Hex() + ".tar"
+ return layerDigest.Encoded() + ".tar", nil
}
type tarFI struct {
@@ -337,7 +355,7 @@ func (t *tarFI) ModTime() time.Time {
func (t *tarFI) IsDir() bool {
return false
}
-func (t *tarFI) Sys() interface{} {
+func (t *tarFI) Sys() any {
return nil
}
@@ -346,7 +364,7 @@ func (t *tarFI) Sys() interface{} {
func (w *Writer) sendSymlinkLocked(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
- return nil
+ return err
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return w.tar.WriteHeader(hdr)
@@ -363,7 +381,7 @@ func (w *Writer) sendBytesLocked(path string, b []byte) error {
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
- return nil
+ return err
}
logrus.Debugf("Sending as tar file %s", path)
if err := w.tar.WriteHeader(hdr); err != nil {
@@ -375,7 +393,7 @@ func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reade
return err
}
if size != expectedSize {
- return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go
deleted file mode 100644
index 0d5d8d82a..000000000
--- a/vendor/github.com/containers/image/v5/docker/lookaside.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package docker
-
-import (
- "fmt"
- "io/ioutil"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "strings"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/rootless"
- "github.com/containers/image/v5/types"
- "github.com/containers/storage/pkg/homedir"
- "github.com/ghodss/yaml"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
-// You can override this at build time with
-// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path'
-var systemRegistriesDirPath = builtinRegistriesDirPath
-
-// builtinRegistriesDirPath is the path to registries.d.
-// DO NOT change this, instead see systemRegistriesDirPath above.
-const builtinRegistriesDirPath = "/etc/containers/registries.d"
-
-// userRegistriesDirPath is the path to the per user registries.d.
-var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
-
-// defaultUserDockerDir is the default sigstore directory for unprivileged user
-var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
-
-// defaultDockerDir is the default sigstore directory for root
-var defaultDockerDir = "/var/lib/containers/sigstore"
-
-// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
-// NOTE: Keep this in sync with docs/registries.d.md!
-type registryConfiguration struct {
- DefaultDocker *registryNamespace `json:"default-docker"`
- // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
- Docker map[string]registryNamespace `json:"docker"`
-}
-
-// registryNamespace defines lookaside locations for a single namespace.
-type registryNamespace struct {
- SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
- SigStoreStaging string `json:"sigstore-staging"` // For writing only.
-}
-
-// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
-// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below.
-type signatureStorageBase *url.URL
-
-// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “writeâ€.
-// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
-// Warning: This function only exposes configuration in registries.d;
-// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
-func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
- dr, ok := ref.(dockerReference)
- if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
- }
- // FIXME? Loading and parsing the config could be cached across calls.
- dirPath := registriesDirPath(sys)
- logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
- config, err := loadAndMergeConfig(dirPath)
- if err != nil {
- return nil, err
- }
-
- topLevel := config.signatureTopLevel(dr, write)
- var url *url.URL
- if topLevel != "" {
- url, err = url.Parse(topLevel)
- if err != nil {
- return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
- }
- } else {
- // returns default directory if no sigstore specified in configuration file
- url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
- logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String())
- }
- // NOTE: Keep this in sync with docs/signature-protocols.md!
- // FIXME? Restrict to explicitly supported schemes?
- repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
- if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
- return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
- }
- url.Path = url.Path + "/" + repo
- return url, nil
-}
-
-// registriesDirPath returns a path to registries.d
-func registriesDirPath(sys *types.SystemContext) string {
- return registriesDirPathWithHomeDir(sys, homedir.Get())
-}
-
-// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
-// it exists only to allow testing it with an artificial home directory.
-func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
- if sys != nil && sys.RegistriesDirPath != "" {
- return sys.RegistriesDirPath
- }
- userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
- if _, err := os.Stat(userRegistriesDirPath); err == nil {
- return userRegistriesDirPath
- }
- if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
- }
-
- return systemRegistriesDirPath
-}
-
-// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid
-func builtinDefaultSignatureStorageDir(euid int) *url.URL {
- if euid != 0 {
- return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
- }
- return &url.URL{Scheme: "file", Path: defaultDockerDir}
-}
-
-// loadAndMergeConfig loads configuration files in dirPath
-func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
- mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
- dockerDefaultMergedFrom := ""
- nsMergedFrom := map[string]string{}
-
- dir, err := os.Open(dirPath)
- if err != nil {
- if os.IsNotExist(err) {
- return &mergedConfig, nil
- }
- return nil, err
- }
- configNames, err := dir.Readdirnames(0)
- if err != nil {
- return nil, err
- }
- for _, configName := range configNames {
- if !strings.HasSuffix(configName, ".yaml") {
- continue
- }
- configPath := filepath.Join(dirPath, configName)
- configBytes, err := ioutil.ReadFile(configPath)
- if err != nil {
- return nil, err
- }
-
- var config registryConfiguration
- err = yaml.Unmarshal(configBytes, &config)
- if err != nil {
- return nil, errors.Wrapf(err, "Error parsing %s", configPath)
- }
-
- if config.DefaultDocker != nil {
- if mergedConfig.DefaultDocker != nil {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
- dockerDefaultMergedFrom, configPath)
- }
- mergedConfig.DefaultDocker = config.DefaultDocker
- dockerDefaultMergedFrom = configPath
- }
-
- for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
- if _, ok := mergedConfig.Docker[nsName]; ok {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
- nsName, nsMergedFrom[nsName], configPath)
- }
- mergedConfig.Docker[nsName] = nsConfig
- nsMergedFrom[nsName] = configPath
- }
- }
-
- return &mergedConfig, nil
-}
-
-// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “writeâ€.
-// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
-func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
- if config.Docker != nil {
- // Look for a full match.
- identity := ref.PolicyConfigurationIdentity()
- if ns, ok := config.Docker[identity]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, identity)
- if url := ns.signatureTopLevel(write); url != "" {
- return url
- }
- }
-
- // Look for a match of the possible parent namespaces.
- for _, name := range ref.PolicyConfigurationNamespaces() {
- if ns, ok := config.Docker[name]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, name)
- if url := ns.signatureTopLevel(write); url != "" {
- return url
- }
- }
- }
- }
- // Look for a default location
- if config.DefaultDocker != nil {
- logrus.Debugf(` Using "default-docker" configuration`)
- if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
- return url
- }
- }
- return ""
-}
-
-// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “writeâ€.
-// or "" if nothing has been configured.
-func (ns registryNamespace) signatureTopLevel(write bool) string {
- if write && ns.SigStoreStaging != "" {
- logrus.Debugf(` Using %s`, ns.SigStoreStaging)
- return ns.SigStoreStaging
- }
- if ns.SigStore != "" {
- logrus.Debugf(` Using %s`, ns.SigStore)
- return ns.SigStore
- }
- return ""
-}
-
-// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
-// base is not nil from the caller
-// NOTE: Keep this in sync with docs/signature-protocols.md!
-func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
- url := *base
- url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
- return &url
-}
diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go
new file mode 100644
index 000000000..862e88039
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/paths_common.go
@@ -0,0 +1,6 @@
+//go:build !freebsd
+// +build !freebsd
+
+package docker
+
+const etcDir = "/etc"
diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go
new file mode 100644
index 000000000..2bf27ac06
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go
@@ -0,0 +1,6 @@
+//go:build freebsd
+// +build freebsd
+
+package docker
+
+const etcDir = "/usr/local/etc"
diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
index 94e9e5f23..e1f1f1f2b 100644
--- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
+++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
@@ -1,10 +1,11 @@
package policyconfiguration
import (
+ "errors"
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/pkg/errors"
)
// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
@@ -16,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) {
digested, isDigested := ref.(reference.Canonical)
switch {
case isTagged && isDigested: // Note that this CAN actually happen.
- return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
- return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
case isTagged:
res = res + ":" + tagged.Tag()
case isDigested:
@@ -39,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
// then in its parent "docker.io/library"; in none of "busybox",
// un-namespaced "library" nor in "" supposedly implicitly representing "library/".
//
- // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
+ // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last
// iteration matches the host name (for any namespace).
res := []string{}
name := ref.Name()
diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go
index 6a86ec64f..d3f47d210 100644
--- a/vendor/github.com/containers/image/v5/docker/reference/normalize.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go
@@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
}
// familiarizeName returns a shortened version of the name familiar
-// to to the Docker UI. Familiar names have the default domain
+// to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go
index 8c0c23b2f..6c5484c06 100644
--- a/vendor/github.com/containers/image/v5/docker/reference/reference.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go
@@ -3,13 +3,13 @@
//
// Grammar
//
-// reference := name [ ":" tag ] [ "@" digest ]
+// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
-// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
+// path-component := alphanumeric [separator alphanumeric]*
+// alphanumeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
@@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
-// DEPRECATED: Use Domain or Path
+// Deprecated: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()
diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
new file mode 100644
index 000000000..7b15871f7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
@@ -0,0 +1,6 @@
+package reference
+
+// Return true if the specified string fully matches `IdentifierRegexp`.
+func IsFullIdentifier(s string) bool {
+ return anchoredIdentifierRegexp.MatchString(s)
+}
diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go
index 786034932..76ba5c2d5 100644
--- a/vendor/github.com/containers/image/v5/docker/reference/regexp.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go
@@ -1,143 +1,156 @@
package reference
-import "regexp"
+import (
+ "regexp"
+ "strings"
-var (
- // alphaNumericRegexp defines the alpha numeric atom, typically a
+ storageRegexp "github.com/containers/storage/pkg/regexp"
+)
+
+const (
+ // alphaNumeric defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
- alphaNumericRegexp = match(`[a-z0-9]+`)
+ alphaNumeric = `[a-z0-9]+`
- // separatorRegexp defines the separators allowed to be embedded in name
+ // separator defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
- // dashes.
- separatorRegexp = match(`(?:[._]|__|[-]*)`)
+ // dashes. Repeated dashes and underscores are intentionally treated
+ // differently. In order to support valid hostnames as name components,
+ // supporting repeated dash was added. Additionally double underscore is
+ // now allowed as a separator to loosen the restriction for previously
+ // supported names.
+ separator = `(?:[._]|__|[-]*)`
- // nameComponentRegexp restricts registry path component names to start
- // with at least one letter or number, with following parts able to be
- // separated by one period, one or two underscore and multiple dashes.
- nameComponentRegexp = expression(
- alphaNumericRegexp,
- optional(repeated(separatorRegexp, alphaNumericRegexp)))
-
- // domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
- domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+ domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
+
+ // The string counterpart for TagRegexp.
+ tag = `[\w][\w.-]{0,127}`
+
+ // The string counterpart for DigestRegexp.
+ digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
+
+ // The string counterpart for IdentifierRegexp.
+ identifier = `([a-f0-9]{64})`
+ // The string counterpart for ShortIdentifierRegexp.
+ shortIdentifier = `([a-f0-9]{6,64})`
+)
+
+var (
+ // nameComponent restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponent = expression(
+ alphaNumeric,
+ optional(repeated(separator, alphaNumeric)))
+
+ domain = expression(
+ domainComponent,
+ optional(repeated(literal(`.`), domainComponent)),
+ optional(literal(`:`), `[0-9]+`))
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
- DomainRegexp = expression(
- domainComponentRegexp,
- optional(repeated(literal(`.`), domainComponentRegexp)),
- optional(literal(`:`), match(`[0-9]+`)))
+ DomainRegexp = re(domain)
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
- TagRegexp = match(`[\w][\w.-]{0,127}`)
+ TagRegexp = re(tag)
+ anchoredTag = anchored(tag)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
- anchoredTagRegexp = anchored(TagRegexp)
+ anchoredTagRegexp = storageRegexp.Delayed(anchoredTag)
// DigestRegexp matches valid digests.
- DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+ DigestRegexp = re(digestPat)
+ anchoredDigest = anchored(digestPat)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
- anchoredDigestRegexp = anchored(DigestRegexp)
+ anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest)
+ namePat = expression(
+ optional(domain, literal(`/`)),
+ nameComponent,
+ optional(repeated(literal(`/`), nameComponent)))
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
- NameRegexp = expression(
- optional(DomainRegexp, literal(`/`)),
- nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp)))
+ NameRegexp = re(namePat)
+ anchoredName = anchored(
+ optional(capture(domain), literal(`/`)),
+ capture(nameComponent,
+ optional(repeated(literal(`/`), nameComponent))))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
- anchoredNameRegexp = anchored(
- optional(capture(DomainRegexp), literal(`/`)),
- capture(nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp))))
+ anchoredNameRegexp = storageRegexp.Delayed(anchoredName)
+ referencePat = anchored(capture(namePat),
+ optional(literal(":"), capture(tag)),
+ optional(literal("@"), capture(digestPat)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
- ReferenceRegexp = anchored(capture(NameRegexp),
- optional(literal(":"), capture(TagRegexp)),
- optional(literal("@"), capture(DigestRegexp)))
+ ReferenceRegexp = re(referencePat)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
- IdentifierRegexp = match(`([a-f0-9]{64})`)
+ IdentifierRegexp = re(identifier)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
- ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+ ShortIdentifierRegexp = re(shortIdentifier)
+ anchoredIdentifier = anchored(identifier)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
- anchoredIdentifierRegexp = anchored(IdentifierRegexp)
-
- // anchoredShortIdentifierRegexp is used to check if a value
- // is a possible identifier prefix, anchored at start and end
- // of string.
- anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+ anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier)
)
-// match compiles the string to a regular expression.
-var match = regexp.MustCompile
+// re compiles the string to a regular expression.
+var re = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
-func literal(s string) *regexp.Regexp {
- re := match(regexp.QuoteMeta(s))
-
- if _, complete := re.LiteralPrefix(); !complete {
- panic("must be a literal")
- }
-
- return re
+func literal(s string) string {
+ return regexp.QuoteMeta(s)
}
// expression defines a full expression, where each regular expression must
// follow the previous.
-func expression(res ...*regexp.Regexp) *regexp.Regexp {
- var s string
- for _, re := range res {
- s += re.String()
- }
-
- return match(s)
+func expression(res ...string) string {
+ return strings.Join(res, "")
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
-func optional(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `?`)
+func optional(res ...string) string {
+ return group(expression(res...)) + `?`
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
-func repeated(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `+`)
+func repeated(res ...string) string {
+ return group(expression(res...)) + `+`
}
// group wraps the regexp in a non-capturing group.
-func group(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(?:` + expression(res...).String() + `)`)
+func group(res ...string) string {
+ return `(?:` + expression(res...) + `)`
}
// capture wraps the expression in a capturing group.
-func capture(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(` + expression(res...).String() + `)`)
+func capture(res ...string) string {
+ return `(` + expression(res...) + `)`
}
// anchored anchors the regular expression by adding start and end delimiters.
-func anchored(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`^` + expression(res...).String() + `$`)
+func anchored(res ...string) string {
+ return `^` + expression(res...) + `$`
}
diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go
new file mode 100644
index 000000000..3619c3bae
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/registries_d.go
@@ -0,0 +1,297 @@
+package docker
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/rootless"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "gopkg.in/yaml.v3"
+)
+
+// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path'
+var systemRegistriesDirPath = builtinRegistriesDirPath
+
+// builtinRegistriesDirPath is the path to registries.d.
+// DO NOT change this, instead see systemRegistriesDirPath above.
+const builtinRegistriesDirPath = etcDir + "/containers/registries.d"
+
+// userRegistriesDirPath is the path to the per user registries.d.
+var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
+
+// defaultUserDockerDir is the default lookaside directory for unprivileged user
+var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
+
+// defaultDockerDir is the default lookaside directory for root
+var defaultDockerDir = "/var/lib/containers/sigstore"
+
+// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
+// NOTE: Keep this in sync with docs/registries.d.md!
+type registryConfiguration struct {
+ DefaultDocker *registryNamespace `yaml:"default-docker"`
+ // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
+ Docker map[string]registryNamespace `yaml:"docker"`
+}
+
+// registryNamespace defines lookaside locations for a single namespace.
+type registryNamespace struct {
+ Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
+ LookasideStaging string `yaml:"lookaside-staging"` // For writing only.
+ SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
+ SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
+ UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"`
+}
+
+// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below.
+type lookasideStorageBase *url.URL
+
+// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “writeâ€.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+// Warning: This function only exposes configuration in registries.d;
+// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
+func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
+ dr, ok := ref.(dockerReference)
+ if !ok {
+ return nil, errors.New("ref must be a dockerReference")
+ }
+ config, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ return config.lookasideStorageBaseURL(dr, write)
+}
+
+// loadRegistryConfiguration returns a registryConfiguration appropriate for sys.
+func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) {
+ dirPath := registriesDirPath(sys)
+ logrus.Debugf(`Using registries.d directory %s`, dirPath)
+ return loadAndMergeConfig(dirPath)
+}
+
+// registriesDirPath returns a path to registries.d
+func registriesDirPath(sys *types.SystemContext) string {
+ return registriesDirPathWithHomeDir(sys, homedir.Get())
+}
+
+// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
+// it exists only to allow testing it with an artificial home directory.
+func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
+ if sys != nil && sys.RegistriesDirPath != "" {
+ return sys.RegistriesDirPath
+ }
+ userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
+ if err := fileutils.Exists(userRegistriesDirPath); err == nil {
+ return userRegistriesDirPath
+ }
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+
+ return systemRegistriesDirPath
+}
+
+// loadAndMergeConfig loads configuration files in dirPath
+// FIXME: Probably rename to loadRegistryConfigurationForPath
+func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
+ mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
+ dockerDefaultMergedFrom := ""
+ nsMergedFrom := map[string]string{}
+
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &mergedConfig, nil
+ }
+ return nil, err
+ }
+ configNames, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ for _, configName := range configNames {
+ if !strings.HasSuffix(configName, ".yaml") {
+ continue
+ }
+ configPath := filepath.Join(dirPath, configName)
+ configBytes, err := os.ReadFile(configPath)
+ if err != nil {
+ return nil, err
+ }
+
+ var config registryConfiguration
+ err = yaml.Unmarshal(configBytes, &config)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %w", configPath, err)
+ }
+
+ if config.DefaultDocker != nil {
+ if mergedConfig.DefaultDocker != nil {
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`,
+ dockerDefaultMergedFrom, configPath)
+ }
+ mergedConfig.DefaultDocker = config.DefaultDocker
+ dockerDefaultMergedFrom = configPath
+ }
+
+ for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
+ if _, ok := mergedConfig.Docker[nsName]; ok {
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`,
+ nsName, nsMergedFrom[nsName], configPath)
+ }
+ mergedConfig.Docker[nsName] = nsConfig
+ nsMergedFrom[nsName] = configPath
+ }
+ }
+
+ return &mergedConfig, nil
+}
+
+// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “writeâ€.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) {
+ topLevel := config.signatureTopLevel(dr, write)
+ var baseURL *url.URL
+ if topLevel != "" {
+ u, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
+ }
+ baseURL = u
+ } else {
+ // returns default directory if no lookaside specified in configuration file
+ baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
+ logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted())
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
+ }
+ baseURL.Path = baseURL.Path + "/" + repo
+ return baseURL, nil
+}
+
+// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid
+func builtinDefaultLookasideStorageDir(euid int) *url.URL {
+ if euid != 0 {
+ return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
+ }
+ return &url.URL{Scheme: "file", Path: defaultDockerDir}
+}
+
+// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “writeâ€.
+// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
+func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity)
+ if ret := ns.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name)
+ if ret := ns.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`)
+ if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+ return ""
+}
+
+// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments.
+// for ref.
+func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`)
+ if config.DefaultDocker.UseSigstoreAttachments != nil {
+ return *config.DefaultDocker.UseSigstoreAttachments
+ }
+ }
+ return false
+}
+
+// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “writeâ€.
+// or "" if nothing has been configured.
+func (ns registryNamespace) signatureTopLevel(write bool) string {
+ if write {
+ if ns.LookasideStaging != "" {
+ logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging)
+ return ns.LookasideStaging
+ }
+ if ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ }
+ if ns.Lookaside != "" {
+ logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside)
+ return ns.Lookaside
+ }
+ if ns.SigStore != "" {
+ logrus.Debugf(` Using "sigstore" %s`, ns.SigStore)
+ return ns.SigStore
+ }
+ return ""
+}
+
+// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
+// base is not nil from the caller
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
+ if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
+ return nil, err
+ }
+ sigURL := *base
+ sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
+ return &sigURL, nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
index d0bbbba8a..6bcb835b9 100644
--- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
+++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
@@ -3,6 +3,7 @@ package docker
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
import (
+ "fmt"
"net/http"
"strings"
)
@@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge {
return challenges
}
+// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
+func parseAuthScope(scopeStr string) (*authScope, error) {
+ if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
+ return &authScope{
+ resourceType: parts[0],
+ remoteName: parts[1],
+ actions: parts[2],
+ }, nil
+ }
+ return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
+}
+
// NOTE: This is not a fully compliant parser per RFC 7235:
// Most notably it does not support more than one challenge within a single header
// Some of the whitespace parsing also seems noncompliant.
@@ -136,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
- for i = i + 1; i < len(s); i++ {
+ for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:
diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go
deleted file mode 100644
index 651c301aa..000000000
--- a/vendor/github.com/containers/image/v5/image/docker_list.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package image
-
-import (
- "context"
-
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
- "github.com/pkg/errors"
-)
-
-func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
- list, err := manifest.Schema2ListFromManifest(manblob)
- if err != nil {
- return nil, errors.Wrapf(err, "Error parsing schema2 manifest list")
- }
- targetManifestDigest, err := list.ChooseInstance(sys)
- if err != nil {
- return nil, errors.Wrapf(err, "Error choosing image instance")
- }
- manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "Error loading manifest for target platform")
- }
-
- matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
- if err != nil {
- return nil, errors.Wrap(err, "Error computing manifest digest")
- }
- if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
- }
-
- return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
-}
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go
index 61ca83364..e5a3b8991 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go
@@ -1,400 +1,14 @@
package image
import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
+ "github.com/containers/image/v5/internal/image"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so let’s use the same values.
-var GzippedEmptyLayer = []byte{
- 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
- 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
-}
+var GzippedEmptyLayer = image.GzippedEmptyLayer
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
-const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
-
-type manifestSchema2 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- m *manifest.Schema2
-}
-
-func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.Schema2FromManifest(manifestBlob)
- if err != nil {
- return nil, err
- }
- return &manifestSchema2{
- src: src,
- m: m,
- }, nil
-}
-
-// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
-func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
- return &manifestSchema2{
- src: src,
- configBlob: configBlob,
- m: manifest.Schema2FromComponents(config, layers),
- }
-}
-
-func (m *manifestSchema2) serialize() ([]byte, error) {
- return m.m.Serialize()
-}
-
-func (m *manifestSchema2) manifestMIMEType() string {
- return m.m.MediaType
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
-func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
-}
-
-// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
-// layers in the resulting configuration isn't guaranteed to be returned to due how
-// old image manifests work (docker v2s1 especially).
-func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
- configBlob, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
- // than OCI v1. This unmarshal makes sure we drop docker v2s2
- // fields that aren't needed in OCI v1.
- configOCI := &imgspecv1.Image{}
- if err := json.Unmarshal(configBlob, configOCI); err != nil {
- return nil, err
- }
- return configOCI, nil
-}
-
-// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
-// The result is cached; it is OK to call this however often you need.
-func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
- if m.configBlob == nil {
- if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
- }
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
- if err != nil {
- return nil, err
- }
- defer stream.Close()
- blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
- if err != nil {
- return nil, err
- }
- computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
- }
- m.configBlob = blob
- }
- return m.configBlob, nil
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
- return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
-}
-
-// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
-// It returns false if the manifest does not embed a Docker reference.
-// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
-func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
- return false
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
- getter := func(info types.BlobInfo) ([]byte, error) {
- if info.Digest != m.ConfigInfo().Digest {
- // Shouldn't ever happen
- return nil, errors.New("asked for a different config blob")
- }
- config, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- return config, nil
- }
- return m.m.Inspect(getter)
-}
-
-// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
-// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
-// (most importantly it forces us to download the full layers even if they are already present at the destination).
-func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
- return false
-}
-
-// UpdatedImage returns a types.Image modified according to options.
-// This does not change the state of the original Image object.
-// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
-// if the CompressionOperation and CompressionAlgorithm specified in one or more
-// options.LayerInfos items is anything other than gzip.
-func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.Schema2Clone(m.m),
- }
-
- converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
- manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
- manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
- imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
- })
- if err != nil {
- return nil, err
- }
-
- if converted != nil {
- return converted, nil
- }
-
- // No conversion required, update manifest
- if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
- }
- }
- // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
-
- return memoryImageFromManifest(©), nil
-}
-
-func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
- return imgspecv1.Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
-// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
- configOCI, err := m.OCIConfig(ctx)
- if err != nil {
- return nil, err
- }
- configOCIBytes, err := json.Marshal(configOCI)
- if err != nil {
- return nil, err
- }
-
- config := imgspecv1.Descriptor{
- MediaType: imgspecv1.MediaTypeImageConfig,
- Size: int64(len(configOCIBytes)),
- Digest: digest.FromBytes(configOCIBytes),
- }
-
- layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
- for idx := range layers {
- layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
- switch m.m.LayersDescriptors[idx].MediaType {
- case manifest.DockerV2Schema2ForeignLayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
- case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
- case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema2LayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
- default:
- return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
- }
- }
-
- return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
-}
-
-// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-//
-// Based on docker/distribution/manifest/schema1/config_builder.go
-func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- dest := options.InformationOnly.Destination
-
- var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
- if options.LayerInfos != nil {
- if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
- return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
- len(options.LayerInfos), len(m.m.LayersDescriptors))
- }
- convertedLayerUpdates = []types.BlobInfo{}
- }
-
- configBytes, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- imageConfig := &manifest.Schema2Image{}
- if err := json.Unmarshal(configBytes, imageConfig); err != nil {
- return nil, err
- }
-
- // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
- fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
- history := make([]manifest.Schema1History, len(imageConfig.History))
- nonemptyLayerIndex := 0
- var parentV1ID string // Set in the loop
- v1ID := ""
- haveGzippedEmptyLayer := false
- if len(imageConfig.History) == 0 {
- // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
- }
- for v2Index, historyEntry := range imageConfig.History {
- parentV1ID = v1ID
- v1Index := len(imageConfig.History) - 1 - v2Index
-
- var blobDigest digest.Digest
- if historyEntry.EmptyLayer {
- emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
-
- if !haveGzippedEmptyLayer {
- logrus.Debugf("Uploading empty layer during conversion to schema 1")
- // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
- // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
- if err != nil {
- return nil, errors.Wrap(err, "Error uploading empty layer")
- }
- if info.Digest != emptyLayerBlobInfo.Digest {
- return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
- }
- haveGzippedEmptyLayer = true
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
- }
- blobDigest = emptyLayerBlobInfo.Digest
- } else {
- if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
- }
- blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
- nonemptyLayerIndex++
- }
-
- // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
- v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
- if err != nil {
- return nil, err
- }
- v1ID = v
-
- fakeImage := manifest.Schema1V1Compatibility{
- ID: v1ID,
- Parent: parentV1ID,
- Comment: historyEntry.Comment,
- Created: historyEntry.Created,
- Author: historyEntry.Author,
- ThrowAway: historyEntry.EmptyLayer,
- }
- fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
- v1CompatibilityBytes, err := json.Marshal(&fakeImage)
- if err != nil {
- return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
- }
-
- fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
- history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
- // Note that parentV1ID of the top layer is preserved when exiting this loop
- }
-
- // Now patch in real configuration for the top layer (v1Index == 0)
- v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
- if err != nil {
- return nil, err
- }
- v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
- if err != nil {
- return nil, err
- }
- history[0].V1Compatibility = string(v1Config)
-
- if options.LayerInfos != nil {
- options.LayerInfos = convertedLayerUpdates
- }
- m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
- if err != nil {
- return nil, err // This should never happen, we should have created all the components correctly.
- }
- return m1, nil
-}
-
-func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
- if err := blobDigest.Validate(); err != nil {
- return "", err
- }
- parts := append([]string{blobDigest.Hex()}, others...)
- v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
- return hex.EncodeToString(v1IDHash[:]), nil
-}
-
-func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
- // Preserve everything we don't specifically know about.
- // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
- rawContents := map[string]*json.RawMessage{}
- if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
- return nil, err
- }
- delete(rawContents, "rootfs")
- delete(rawContents, "history")
-
- updates := map[string]interface{}{"id": v1ID}
- if parentV1ID != "" {
- updates["parent"] = parentV1ID
- }
- if throwaway {
- updates["throwaway"] = throwaway
- }
- for field, value := range updates {
- encoded, err := json.Marshal(value)
- if err != nil {
- return nil, err
- }
- rawContents[field] = (*json.RawMessage)(&encoded)
- }
- return json.Marshal(rawContents)
-}
-
-// SupportsEncryption returns if encryption is supported for the manifest type
-func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
- return false
-}
+const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest
diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go
deleted file mode 100644
index 36d70b5c2..000000000
--- a/vendor/github.com/containers/image/v5/image/manifest.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package image
-
-import (
- "context"
- "fmt"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-// genericManifest is an interface for parsing, modifying image manifests and related data.
-// Note that the public methods are intended to be a subset of types.Image
-// so that embedding a genericManifest into structs works.
-// will support v1 one day...
-type genericManifest interface {
- serialize() ([]byte, error)
- manifestMIMEType() string
- // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
- // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
- ConfigInfo() types.BlobInfo
- // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
- // The result is cached; it is OK to call this however often you need.
- ConfigBlob(context.Context) ([]byte, error)
- // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
- // layers in the resulting configuration isn't guaranteed to be returned to due how
- // old image manifests work (docker v2s1 especially).
- OCIConfig(context.Context) (*imgspecv1.Image, error)
- // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
- // The Digest field is guaranteed to be provided; Size may be -1.
- // WARNING: The list may contain duplicates, and they are semantically relevant.
- LayerInfos() []types.BlobInfo
- // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
- // It returns false if the manifest does not embed a Docker reference.
- // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
- EmbeddedDockerReferenceConflicts(ref reference.Named) bool
- // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
- Inspect(context.Context) (*types.ImageInspectInfo, error)
- // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
- // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
- // (most importantly it forces us to download the full layers even if they are already present at the destination).
- UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
- // UpdatedImage returns a types.Image modified according to options.
- // This does not change the state of the original Image object.
- UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
- // SupportsEncryption returns if encryption is supported for the manifest type
- //
- // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
- // the process of updating a manifest between different manifest types was to update then convert.
- // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
- SupportsEncryption(ctx context.Context) bool
-}
-
-// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
-// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
-func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
- switch manifest.NormalizedMIMEType(mt) {
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
- return manifestSchema1FromManifest(manblob)
- case imgspecv1.MediaTypeImageManifest:
- return manifestOCI1FromManifest(src, manblob)
- case manifest.DockerV2Schema2MediaType:
- return manifestSchema2FromManifest(src, manblob)
- case manifest.DockerV2ListMediaType:
- return manifestSchema2FromManifestList(ctx, sys, src, manblob)
- case imgspecv1.MediaTypeImageIndex:
- return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
- default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
- }
-}
-
-// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
-func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
- blobs := make([]types.BlobInfo, len(layers))
- for i, layer := range layers {
- blobs[i] = layer.BlobInfo
- }
- return blobs
-}
-
-// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation
-// converted to a specific manifest MIME type.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original genericManifest object.
-type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error)
-
-// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if
-// required and re-apply the options to the converted type.
-// It returns (nil, nil) if no conversion was requested.
-func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {
- if options.ManifestMIMEType == "" {
- return nil, nil
- }
-
- converter, ok := converters[options.ManifestMIMEType]
- if !ok {
- return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
- }
-
- optionsCopy := options
- convertedManifest, err := converter(ctx, &optionsCopy)
- if err != nil {
- return nil, err
- }
- convertedImage := memoryImageFromManifest(convertedManifest)
-
- optionsCopy.ManifestMIMEType = ""
- return convertedImage.UpdatedImage(ctx, optionsCopy)
-}
diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go
deleted file mode 100644
index 4c96b37d8..000000000
--- a/vendor/github.com/containers/image/v5/image/memory.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package image
-
-import (
- "context"
-
- "github.com/containers/image/v5/types"
- "github.com/pkg/errors"
-)
-
-// memoryImage is a mostly-implementation of types.Image assembled from data
-// created in memory, used primarily as a return value of types.Image.UpdatedImage
-// as a way to carry various structured information in a type-safe and easy-to-use way.
-// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
-// collection of all related information, e.g. there is no way to get layer blobs
-// from a memoryImage.
-type memoryImage struct {
- genericManifest
- serializedManifest []byte // A private cache for Manifest()
-}
-
-func memoryImageFromManifest(m genericManifest) types.Image {
- return &memoryImage{
- genericManifest: m,
- serializedManifest: nil,
- }
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (i *memoryImage) Reference() types.ImageReference {
- // It would really be inappropriate to return the ImageReference of the image this was based on.
- return nil
-}
-
-// Size returns the size of the image as stored, if known, or -1 if not.
-func (i *memoryImage) Size() (int64, error) {
- return -1, nil
-}
-
-// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
-func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) {
- if i.serializedManifest == nil {
- m, err := i.genericManifest.serialize()
- if err != nil {
- return nil, "", err
- }
- i.serializedManifest = m
- }
- return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
-}
-
-// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
-func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
- // Modifying an image invalidates signatures; a caller asking the updated image for signatures
- // is probably confused.
- return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
-}
-
-// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go
deleted file mode 100644
index 58e9c03ba..000000000
--- a/vendor/github.com/containers/image/v5/image/oci.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package image
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-type manifestOCI1 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of m.Config.
- m *manifest.OCI1
-}
-
-func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.OCI1FromManifest(manifestBlob)
- if err != nil {
- return nil, err
- }
- return &manifestOCI1{
- src: src,
- m: m,
- }, nil
-}
-
-// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
-func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
- return &manifestOCI1{
- src: src,
- configBlob: configBlob,
- m: manifest.OCI1FromComponents(config, layers),
- }
-}
-
-func (m *manifestOCI1) serialize() ([]byte, error) {
- return m.m.Serialize()
-}
-
-func (m *manifestOCI1) manifestMIMEType() string {
- return imgspecv1.MediaTypeImageManifest
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
-func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
-}
-
-// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
-// The result is cached; it is OK to call this however often you need.
-func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
- if m.configBlob == nil {
- if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
- }
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
- if err != nil {
- return nil, err
- }
- defer stream.Close()
- blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
- if err != nil {
- return nil, err
- }
- computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.Config.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
- }
- m.configBlob = blob
- }
- return m.configBlob, nil
-}
-
-// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
-// layers in the resulting configuration isn't guaranteed to be returned to due how
-// old image manifests work (docker v2s1 especially).
-func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
- cb, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- configOCI := &imgspecv1.Image{}
- if err := json.Unmarshal(cb, configOCI); err != nil {
- return nil, err
- }
- return configOCI, nil
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
- return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
-}
-
-// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
-// It returns false if the manifest does not embed a Docker reference.
-// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
-func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
- return false
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
- getter := func(info types.BlobInfo) ([]byte, error) {
- if info.Digest != m.ConfigInfo().Digest {
- // Shouldn't ever happen
- return nil, errors.New("asked for a different config blob")
- }
- config, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- return config, nil
- }
- return m.m.Inspect(getter)
-}
-
-// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
-// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
-// (most importantly it forces us to download the full layers even if they are already present at the destination).
-func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
- return false
-}
-
-// UpdatedImage returns a types.Image modified according to options.
-// This does not change the state of the original Image object.
-// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
-// if the combination of CompressionOperation and CompressionAlgorithm specified
-// in one or more options.LayerInfos items indicates that a layer is compressed using
-// an algorithm that is not allowed in OCI.
-func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.OCI1Clone(m.m),
- }
-
- converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
- manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
- manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
- manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
- })
- if err != nil {
- return nil, err
- }
-
- if converted != nil {
- return converted, nil
- }
-
- // No conversion required, update manifest
- if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
- }
- }
- // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
-
- return memoryImageFromManifest(©), nil
-}
-
-func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
- return manifest.Schema2Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
-// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema1 object.
-//
-// We need this function just because a function returning an implementation of the genericManifest
-// interface is not automatically assignable to a function type returning the genericManifest interface
-func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- return m.convertToManifestSchema2(ctx, options)
-}
-
-// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestOCI1 object.
-func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
- // Create a copy of the descriptor.
- config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
-
- // The only difference between OCI and DockerSchema2 is the mediatypes. The
- // media type of the manifest is handled by manifestSchema2FromComponents.
- config.MediaType = manifest.DockerV2Schema2ConfigMediaType
-
- layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
- for idx := range layers {
- layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
- switch layers[idx].MediaType {
- case imgspecv1.MediaTypeImageLayerNonDistributable:
- layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
- case imgspecv1.MediaTypeImageLayerNonDistributableGzip:
- layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
- case imgspecv1.MediaTypeImageLayerNonDistributableZstd:
- return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
- case imgspecv1.MediaTypeImageLayer:
- layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
- case imgspecv1.MediaTypeImageLayerGzip:
- layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
- case imgspecv1.MediaTypeImageLayerZstd:
- return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
- default:
- return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
- }
- }
-
- // Rather than copying the ConfigBlob now, we just pass m.src to the
- // translated manifest, since the only difference is the mediatype of
- // descriptors there is no change to any blob stored in m.src.
- return manifestSchema2FromComponents(config, m.src, nil, layers), nil
-}
-
-// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestOCI1 object.
-func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- // We can't directly convert to V1, but we can transitively convert via a V2 image
- m2, err := m.convertToManifestSchema2(ctx, options)
- if err != nil {
- return nil, err
- }
-
- return m2.convertToManifestSchema1(ctx, options)
-}
-
-// SupportsEncryption returns if encryption is supported for the manifest type
-func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
- return true
-}
diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go
deleted file mode 100644
index 022e03aca..000000000
--- a/vendor/github.com/containers/image/v5/image/oci_index.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package image
-
-import (
- "context"
-
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
- "github.com/pkg/errors"
-)
-
-func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
- index, err := manifest.OCI1IndexFromManifest(manblob)
- if err != nil {
- return nil, errors.Wrapf(err, "Error parsing OCI1 index")
- }
- targetManifestDigest, err := index.ChooseInstance(sys)
- if err != nil {
- return nil, errors.Wrapf(err, "Error choosing image instance")
- }
- manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "Error loading manifest for target platform")
- }
-
- matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
- if err != nil {
- return nil, errors.Wrap(err, "Error computing manifest digest")
- }
- if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
- }
-
- return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
-}
diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go
index 3a016e1d0..2b7f6b144 100644
--- a/vendor/github.com/containers/image/v5/image/sourced.go
+++ b/vendor/github.com/containers/image/v5/image/sourced.go
@@ -6,17 +6,10 @@ package image
import (
"context"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
)
-// imageCloser implements types.ImageCloser, perhaps allowing simple users
-// to use a single object without having keep a reference to a types.ImageSource
-// only to call types.ImageSource.Close().
-type imageCloser struct {
- types.Image
- src types.ImageSource
-}
-
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
@@ -31,33 +24,7 @@ type imageCloser struct {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
- img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
- if err != nil {
- return nil, err
- }
- return &imageCloser{
- Image: img,
- src: src,
- }, nil
-}
-
-func (ic *imageCloser) Close() error {
- return ic.src.Close()
-}
-
-// sourcedImage is a general set of utilities for working with container images,
-// whatever is their underlying location (i.e. dockerImageSource-independent).
-// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
-// may not be a `sourcedImage` directly. However, most users of `types.Image`
-// do not care, and those who care about `skopeo/docker.Image` know they do.
-type sourcedImage struct {
- *UnparsedImage
- manifestBlob []byte
- manifestMIMEType string
- // genericManifest contains data corresponding to manifestBlob.
- // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
- // if you want to preserve the original manifest; use manifestBlob directly.
- genericManifest
+ return image.FromSource(ctx, sys, src)
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
@@ -66,39 +33,5 @@ type sourcedImage struct {
//
// The Image must not be used after the underlying ImageSource is Close()d.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
- // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
- // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
- // this is the only UnparsedImage implementation around, anyway.
-
- // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
- manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
- if err != nil {
- return nil, err
- }
-
- parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
- if err != nil {
- return nil, err
- }
-
- return &sourcedImage{
- UnparsedImage: unparsed,
- manifestBlob: manifestBlob,
- manifestMIMEType: manifestMIMEType,
- genericManifest: parsedManifest,
- }, nil
-}
-
-// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
-func (i *sourcedImage) Size() (int64, error) {
- return -1, nil
-}
-
-// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
-func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- return i.manifestBlob, i.manifestMIMEType, nil
-}
-
-func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+ return image.FromUnparsedImage(ctx, sys, unparsed)
}
diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go
index 4e3028d85..f2ebb929a 100644
--- a/vendor/github.com/containers/image/v5/image/unparsed.go
+++ b/vendor/github.com/containers/image/v5/image/unparsed.go
@@ -1,95 +1,41 @@
package image
import (
- "context"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
-type UnparsedImage struct {
- src types.ImageSource
- instanceDigest *digest.Digest
- cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
- // A private cache for Manifest(), may be the empty string if guessing failed.
- // Valid iff cachedManifest is not nil.
- cachedManifestMIMEType string
- cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
-}
+type UnparsedImage = image.UnparsedImage
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
- return &UnparsedImage{
- src: src,
- instanceDigest: instanceDigest,
- }
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (i *UnparsedImage) Reference() types.ImageReference {
- // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
- return i.src.Reference()
+ return image.UnparsedInstance(src, instanceDigest)
}
-// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- if i.cachedManifest == nil {
- m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
- if err != nil {
- return nil, "", err
- }
-
- // ImageSource.GetManifest does not do digest verification, but we do;
- // this immediately protects also any user of types.Image.
- if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
- matches, err := manifest.MatchesDigest(m, digest)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error computing manifest digest")
- }
- if !matches {
- return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
- }
- }
-
- i.cachedManifest = m
- i.cachedManifestMIMEType = mt
- }
- return i.cachedManifest, i.cachedManifestMIMEType, nil
+// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef
+type unparsedWithRef struct {
+ private.UnparsedImage
+ ref types.ImageReference
}
-// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
-// The bool return value seems redundant with digest != ""; it is used explicitly
-// to refuse (unexpected) situations when the digest exists but is "".
-func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
- if i.instanceDigest != nil {
- return *i.instanceDigest, true
- }
- ref := i.Reference().DockerReference()
- if ref != nil {
- if canonical, ok := ref.(reference.Canonical); ok {
- return canonical.Digest(), true
- }
- }
- return "", false
+func (uwr *unparsedWithRef) Reference() types.ImageReference {
+ return uwr.ref
}
-// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
- if i.cachedSignatures == nil {
- sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
- if err != nil {
- return nil, err
- }
- i.cachedSignatures = sigs
+// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef.
+// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image
+// based on a remote-registry policy.
+func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage {
+ return &unparsedWithRef{
+ UnparsedImage: unparsedimage.FromPublic(wrappedInstance),
+ ref: replacementRef,
}
- return i.cachedSignatures, nil
}
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
index 1dceaa669..f31ee3124 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
@@ -1,7 +1,6 @@
package blobinfocache
import (
- "github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -22,10 +21,23 @@ type v1OnlyBlobInfoCache struct {
types.BlobInfoCache
}
-func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
+func (bic *v1OnlyBlobInfoCache) Open() {
}
-func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
+func (bic *v1OnlyBlobInfoCache) Close() {
+}
+
+func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) {
+}
+
+func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
return nil
}
@@ -41,23 +53,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
}
return candidates
}
-
-// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
-// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
-// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
-// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
-// upon success.
-func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) {
- switch compressorName {
- case Uncompressed:
- return types.Decompress, nil, nil
- case UnknownCompression:
- return types.PreserveOriginal, nil, nil
- default:
- algo, err := compression.AlgorithmByName(compressorName)
- if err == nil {
- return types.Compress, &algo, nil
- }
- return types.PreserveOriginal, nil, err
- }
-}
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
index 3c2be57f3..acf82ee63 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
@@ -1,6 +1,7 @@
package blobinfocache
import (
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -18,28 +19,63 @@ const (
// of compression was applied to the blobs it keeps information about.
type BlobInfoCache2 interface {
types.BlobInfoCache
- // RecordDigestCompressorName records a compressor for the blob with the specified digest,
- // or Uncompressed or UnknownCompression.
- // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
- // digest just because some remote author claims so (e.g. because a manifest says so);
+
+ // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+ // Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+ Open()
+ // Close destroys state created by Open().
+ Close()
+
+ // UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+ // Returns "" if the uncompressed digest is unknown.
+ UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest
+ // RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordDigestCompressorData records data for the blob with the specified digest.
+ // WARNING: Only call this with LOCALLY VERIFIED data:
+ // - don’t record a compressor for a digest just because some remote author claims so
+ // (e.g. because a manifest says so);
+ // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+ // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+ // in a manifest)
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
- RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
- // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
+ RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
+ // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
- //
- // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
- // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
+ CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
+}
+
+// DigestCompressorData is information known about how a blob is compressed.
+// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
+type DigestCompressorData struct {
+ BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
+ // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
+ SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
+ SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
+}
+
+// CandidateLocations2Options are used in CandidateLocations2.
+type CandidateLocations2Options struct {
+ // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
+ // CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
- //
- // The CompressorName fields in returned data must never be UnknownCompression.
- CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
+ CanSubstitute bool
+ PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
+ RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
}
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
- Digest digest.Digest
- CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
- Location types.BICLocationReference
+ Digest digest.Digest
+ CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
+ CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+ CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
+ UnknownLocation bool // is true when `Location` for this blob is not set
+ Location types.BICLocationReference // not set if UnknownLocation is set to `true`
}
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
new file mode 100644
index 000000000..617a451aa
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+)
+
+func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ list, err := manifest.Schema2ListFromManifest(manblob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing schema2 manifest list: %w", err)
+ }
+ targetManifestDigest, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, fmt.Errorf("choosing image instance: %w", err)
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err)
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
similarity index 89%
rename from vendor/github.com/containers/image/v5/image/docker_schema1.go
rename to vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
index 5f24970c3..3ef8e144d 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
@@ -2,13 +2,13 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type manifestSchema1 struct {
@@ -165,22 +165,22 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t
if len(m.m.ExtractedV1Compatibility) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
}
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
}
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
}
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.FSLayers))
}
convertedLayerUpdates = []types.BlobInfo{}
@@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
return false
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
+ return true // There are no MIME types in the manifest, so we must assume a valid image.
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
new file mode 100644
index 000000000..01219e391
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
@@ -0,0 +1,413 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayer.
+var GzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
+const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the CompressionOperation and CompressionAlgorithm specified in one or more
+// options.LayerInfos items is anything other than gzip.
+func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ return memoryImageFromManifest(©), nil
+}
+
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
+ configOCI, err := m.OCIConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ }
+
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ switch m.m.LayersDescriptors[idx].MediaType {
+ case manifest.DockerV2Schema2ForeignLayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema2LayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
+ }
+ }
+
+ return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+//
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ dest := options.InformationOnly.Destination
+
+ var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
+ if options.LayerInfos != nil {
+ if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ len(options.LayerInfos), len(m.m.LayersDescriptors))
+ }
+ convertedLayerUpdates = []types.BlobInfo{}
+ }
+
+ configBytes, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &manifest.Schema2Image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
+
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
+ if err != nil {
+ return nil, fmt.Errorf("uploading empty layer: %w", err)
+ }
+ if info.Digest != emptyLayerBlobInfo.Digest {
+ return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
+ }
+ blobDigest = emptyLayerBlobInfo.Digest
+ } else {
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
+ }
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := manifest.Schema1V1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ if options.LayerInfos != nil {
+ options.LayerInfos = convertedLayerUpdates
+ }
+ m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ if err != nil {
+ return nil, err // This should never happen, we should have created all the components correctly.
+ }
+ return m1, nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Encoded()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]any{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
+ return false
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go
new file mode 100644
index 000000000..ed57e08dd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go
@@ -0,0 +1,121 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// genericManifest is an interface for parsing, modifying image manifests and related data.
+// The public methods are related to types.Image so that embedding a genericManifest implements most of it,
+// but there are also public methods that are only visible by packages that can import c/image/internal/image.
+type genericManifest interface {
+ serialize() ([]byte, error)
+ manifestMIMEType() string
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() types.BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob(context.Context) ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig(context.Context) (*imgspecv1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+ Inspect(context.Context) (*types.ImageInspectInfo, error)
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // This does not change the state of the original Image object.
+ UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
+ // SupportsEncryption returns if encryption is supported for the manifest type
+ //
+ // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
+ // the process of updating a manifest between different manifest types was to update then convert.
+ // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
+ SupportsEncryption(ctx context.Context) bool
+
+ // The following methods are not a part of types.Image:
+ // ===
+
+ // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+ // (and the code can handle that).
+ // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+ // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+ // to a different manifest format).
+ CanChangeLayerCompression(mimeType string) bool
+}
+
+// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
+// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
+func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
+ switch manifest.NormalizedMIMEType(mt) {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ return manifestSchema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return manifestOCI1FromManifest(src, manblob)
+ case manifest.DockerV2Schema2MediaType:
+ return manifestSchema2FromManifest(src, manblob)
+ case manifest.DockerV2ListMediaType:
+ return manifestSchema2FromManifestList(ctx, sys, src, manblob)
+ case imgspecv1.MediaTypeImageIndex:
+ return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
+ default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt)
+ }
+}
+
+// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
+func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
+ blobs := make([]types.BlobInfo, len(layers))
+ for i, layer := range layers {
+ blobs[i] = layer.BlobInfo
+ }
+ return blobs
+}
+
+// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation
+// converted to a specific manifest MIME type.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original genericManifest object.
+type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error)
+
+// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if
+// required and re-apply the options to the converted type.
+// It returns (nil, nil) if no conversion was requested.
+func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {
+ if options.ManifestMIMEType == "" {
+ return nil, nil
+ }
+
+ converter, ok := converters[options.ManifestMIMEType]
+ if !ok {
+ return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
+ }
+
+ optionsCopy := options
+ convertedManifest, err := converter(ctx, &optionsCopy)
+ if err != nil {
+ return nil, err
+ }
+ convertedImage := memoryImageFromManifest(convertedManifest)
+
+ optionsCopy.ManifestMIMEType = ""
+ return convertedImage.UpdatedImage(ctx, optionsCopy)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go
new file mode 100644
index 000000000..e22c7aafd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/memory.go
@@ -0,0 +1,64 @@
+package image
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/types"
+)
+
+// memoryImage is a mostly-implementation of types.Image assembled from data
+// created in memory, used primarily as a return value of types.Image.UpdatedImage
+// as a way to carry various structured information in a type-safe and easy-to-use way.
+// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
+// collection of all related information, e.g. there is no way to get layer blobs
+// from a memoryImage.
+type memoryImage struct {
+ genericManifest
+ serializedManifest []byte // A private cache for Manifest()
+}
+
+func memoryImageFromManifest(m genericManifest) types.Image {
+ return &memoryImage{
+ genericManifest: m,
+ serializedManifest: nil,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *memoryImage) Reference() types.ImageReference {
+ // It would really be inappropriate to return the ImageReference of the image this was based on.
+ return nil
+}
+
+// Size returns the size of the image as stored, if known, or -1 if not.
+func (i *memoryImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.serializedManifest == nil {
+ m, err := i.genericManifest.serialize()
+ if err != nil {
+ return nil, "", err
+ }
+ i.serializedManifest = m
+ }
+ return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // Modifying an image invalidates signatures; a caller asking the updated image for signatures
+ // is probably confused.
+ return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
+}
+
+// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go
new file mode 100644
index 000000000..aaef95ff3
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/oci.go
@@ -0,0 +1,336 @@
+package image
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "slices"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ ociencspec "github.com/containers/ocicrypt/spec"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type manifestOCI1 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of m.Config.
+ m *manifest.OCI1
+}
+
+func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestOCI1{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
+func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
+ return &manifestOCI1{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.OCI1FromComponents(config, layers),
+ }
+}
+
+func (m *manifestOCI1) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestOCI1) manifestMIMEType() string {
+ return imgspecv1.MediaTypeImageManifest
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.Config.Digest {
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ cb, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(cb, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the combination of CompressionOperation and CompressionAlgorithm specified
+// in one or more options.LayerInfos items indicates that a layer is compressed using
+// an algorithm that is not allowed in OCI.
+func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.OCI1Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
+
+ return memoryImageFromManifest(©), nil
+}
+
+func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
+ return manifest.Schema2Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema1 object.
+//
+// We need this function just because a function returning an implementation of the genericManifest
+// interface is not automatically assignable to a function type returning the genericManifest interface
+func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ return m.convertToManifestSchema2(ctx, options)
+}
+
+// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
+// If not, it returns (nil, nil).
+// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
+// and edits *options to not try decryption again.
+func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
+ if options == nil || options.LayerInfos == nil {
+ return nil, nil
+ }
+
+ originalInfos := m.LayerInfos()
+ if len(originalInfos) != len(options.LayerInfos) {
+ return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
+ }
+
+ ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
+ laterEdits := slices.Clone(options.LayerInfos)
+ needsOCIOnlyEdits := false
+ for i, edit := range options.LayerInfos {
+ // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
+ ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
+ ociOnlyEdits[i].CompressionAlgorithm = nil
+
+ if edit.CryptoOperation == types.Decrypt {
+ needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas
+ ociOnlyEdits[i].CryptoOperation = types.Decrypt
+ laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
+ }
+
+ if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
+ originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas.
+ ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
+ ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
+ laterEdits[i].CompressionOperation = types.PreserveOriginal
+ laterEdits[i].CompressionAlgorithm = nil
+ }
+ }
+ if !needsOCIOnlyEdits {
+ return nil, nil
+ }
+
+ options.LayerInfos = laterEdits
+ return ociOnlyEdits, nil
+}
+
+// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestOCI1 object.
+func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
+ // which remove OCI-specific features, because trying to convert those layers would fail.
+ // So, do the layer updates for decryption, and for conversions from Zstd.
+ ociManifest := m.m
+ ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
+ if err != nil {
+ return nil, err
+ }
+ if ociOnlyEdits != nil {
+ ociManifest = manifest.OCI1Clone(ociManifest)
+ if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
+ return nil, err
+ }
+ }
+
+ // Create a copy of the descriptor.
+ config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config)
+
+ // Above, we have already checked that this manifest refers to an image, not an OCI artifact,
+ // so the only difference between OCI and DockerSchema2 is the mediatypes. The
+ // media type of the manifest is handled by manifestSchema2FromComponents.
+ config.MediaType = manifest.DockerV2Schema2ConfigMediaType
+
+ layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers))
+ for idx := range layers {
+ layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx])
+ switch layers[idx].MediaType {
+ case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
+ case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
+ case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
+ case imgspecv1.MediaTypeImageLayer:
+ layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ case imgspecv1.MediaTypeImageLayerGzip:
+ layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
+ case imgspecv1.MediaTypeImageLayerZstd:
+ return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
+ case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
+ ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
+ return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
+ }
+ }
+
+ // Rather than copying the ConfigBlob now, we just pass m.src to the
+ // translated manifest, since the only difference is the mediatype of
+ // descriptors there is no change to any blob stored in m.src.
+ return manifestSchema2FromComponents(config, m.src, nil, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestOCI1 object.
+func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ // We can't directly convert images to V1, but we can transitively convert via a V2 image
+ m2, err := m.convertToManifestSchema2(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return m2.convertToManifestSchema1(ctx, options)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
+ return true
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
new file mode 100644
index 000000000..0e945c851
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+)
+
+func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ index, err := manifest.OCI1IndexFromManifest(manblob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing OCI1 index: %w", err)
+ }
+ targetManifestDigest, err := index.ChooseInstance(sys)
+ if err != nil {
+ return nil, fmt.Errorf("choosing image instance: %w", err)
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err)
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go
new file mode 100644
index 000000000..661891aa5
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go
@@ -0,0 +1,134 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+)
+
+// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
+// If reference points to a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
+ src, err := ref.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+ img, err := FromSource(ctx, sys, src)
+ if err != nil {
+ src.Close()
+ return nil, err
+ }
+ return img, nil
+}
+
+// imageCloser implements types.ImageCloser, perhaps allowing simple users
+// to use a single object without having keep a reference to a types.ImageSource
+// only to call types.ImageSource.Close().
+type imageCloser struct {
+ types.Image
+ src types.ImageSource
+}
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// FromSource “takes ownership†of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+//
+// Most callers can use either FromUnparsedImage or FromReference instead.
+//
+// This is publicly visible as c/image/image.FromSource.
+func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
+ if err != nil {
+ return nil, err
+ }
+ return &imageCloser{
+ Image: img,
+ src: src,
+ }, nil
+}
+
+func (ic *imageCloser) Close() error {
+ return ic.src.Close()
+}
+
+// SourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying transport (i.e. ImageSource-independent).
+// Note the existence of docker.Image and image.memoryImage: various instances
+// of a types.Image may not be a SourcedImage directly.
+//
+// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
+//
+// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
+type SourcedImage struct {
+ *UnparsedImage
+ ManifestBlob []byte // The manifest of the relevant instance
+ ManifestMIMEType string // MIME type of ManifestBlob
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.FromUnparsedImage.
+func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SourcedImage{
+ UnparsedImage: unparsed,
+ ManifestBlob: manifestBlob,
+ ManifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *SourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ return i.ManifestBlob, i.ManifestMIMEType, nil
+}
+
+func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
new file mode 100644
index 000000000..0f026501c
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
@@ -0,0 +1,119 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// This is publicly visible as c/image/image.UnparsedImage.
+type UnparsedImage struct {
+ src private.ImageSource
+ instanceDigest *digest.Digest
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.UnparsedInstance.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return &UnparsedImage{
+ src: imagesource.FromPublic(src),
+ instanceDigest: instanceDigest,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
+ return i.src.Reference()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
+// The bool return value seems redundant with digest != ""; it is used explicitly
+// to refuse (unexpected) situations when the digest exists but is "".
+func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
+ if i.instanceDigest != nil {
+ return *i.instanceDigest, true
+ }
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ return canonical.Digest(), true
+ }
+ }
+ return "", false
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper,
+ // but this is very likely to be the only implementation ever.
+ sigs, err := i.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
new file mode 100644
index 000000000..70b207d9b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
@@ -0,0 +1,114 @@
+package impl
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+// See AddCompat below.
+type Compat struct {
+ dest private.ImageDestinationInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+//
+// Use it like this:
+//
+// type yourDestination struct {
+// impl.Compat
+// …
+// }
+//
+// dest := &yourDestination{…}
+// dest.Compat = impl.AddCompat(dest)
+func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
+ return Compat{dest}
+}
+
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ IsConfig: isConfig,
+ })
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ return types.BlobInfo{
+ Digest: res.Digest,
+ Size: res.Size,
+ }, nil
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ CanSubstitute: canSubstitute,
+ })
+ if !reused || err != nil {
+ return reused, types.BlobInfo{}, err
+ }
+ res := types.BlobInfo{
+ Digest: blob.Digest,
+ Size: blob.Size,
+ CompressionOperation: blob.CompressionOperation,
+ CompressionAlgorithm: blob.CompressionAlgorithm,
+ }
+ // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
+ // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
+ // provide the value in cases where it is likely to be correct.
+ if blob.Digest == info.Digest {
+ res.MediaType = info.MediaType
+ }
+ return true, res, nil
+}
+
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ withFormat := []signature.Signature{}
+ for _, sig := range signatures {
+ withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig))
+ }
+ return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return c.dest.CommitWithOptions(ctx, private.CommitOptions{
+ UnparsedToplevel: unparsedToplevel,
+ })
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
new file mode 100644
index 000000000..9b42cfbec
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
@@ -0,0 +1,15 @@
+package impl
+
+import (
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+)
+
+// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions
+// is acceptable based on opts.
+func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
+ return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+ PossibleManifestFormats: opts.PossibleManifestFormats,
+ RequiredCompression: opts.RequiredCompression,
+ }, opts.OriginalCompression)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
new file mode 100644
index 000000000..704812e9a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
@@ -0,0 +1,72 @@
+package impl
+
+import "github.com/containers/image/v5/types"
+
+// Properties collects properties of an ImageDestination that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // SupportedManifestMIMETypes tells which manifest MIME types the destination supports.
+ // A empty slice or nil means any MIME type can be tried to upload.
+ SupportedManifestMIMETypes []string
+ // DesiredLayerCompression indicates the kind of compression to apply on layers
+ DesiredLayerCompression types.LayerCompression
+ // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs bool
+ // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS.
+ MustMatchRuntimeOS bool
+ // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+ // and would prefer to receive an unmodified manifest instead of one modified for the destination.
+ // Does not make a difference if Reference().DockerReference() is nil.
+ IgnoresEmbeddedDockerReference bool
+ // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently.
+ HasThreadSafePutBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string {
+ return o.vals.SupportedManifestMIMETypes
+}
+
+// DesiredLayerCompression indicates the kind of compression to apply on layers
+func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression {
+ return o.vals.DesiredLayerCompression
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool {
+ return o.vals.AcceptsForeignLayerURLs
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool {
+ return o.vals.MustMatchRuntimeOS
+}
+
+// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool {
+ return o.vals.IgnoresEmbeddedDockerReference
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool {
+ return o.vals.HasThreadSafePutBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
new file mode 100644
index 000000000..22bed4b0f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoPutBlobPartialInitialize implements parts of private.ImageDestination
+// for transports that don’t support PutBlobPartial().
+// See NoPutBlobPartial() below.
+type NoPutBlobPartialInitialize struct {
+ transportName string
+}
+
+// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref.
+func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialRaw(ref.Transport().Name())
+}
+
+// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used
+// in situations where no ImageReference is available.
+func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
+ return false
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
+func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
+ return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
+type ImplementsPutBlobPartial struct{}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
new file mode 100644
index 000000000..7015fd068
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
@@ -0,0 +1,50 @@
+package stubs
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignaturesInitialize implements parts of private.ImageDestination
+// for transports that don’t support storing signatures.
+// See NoSignatures() below.
+type NoSignaturesInitialize struct {
+ message string
+}
+
+// NoSignatures creates a NoSignaturesInitialize, failing with message.
+func NoSignatures(message string) NoSignaturesInitialize {
+ return NoSignaturesInitialize{
+ message: message,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error {
+ return errors.New(stub.message)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if len(signatures) != 0 {
+ return errors.New(stub.message)
+ }
+ return nil
+}
+
+// SupportsSignatures implements SupportsSignatures() that returns nil.
+// Note that it might be even more useful to return a value dynamically detected based on
+type AlwaysSupportsSignatures struct{}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
new file mode 100644
index 000000000..ab233406a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
@@ -0,0 +1,27 @@
+// Package stubs contains trivial stubs for parts of private.ImageDestination.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagedestination/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
+// implementation:
+//
+// type yourDestination struct {
+// stubs.ImplementsPutBlobPartial
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
+// means that a constructor must be called:
+//
+// type yourDestination struct {
+// stubs.NoPutBlobPartialInitialize
+// …
+// }
+//
+// dest := &yourDestination{
+// …
+// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+// }
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
new file mode 100644
index 000000000..0fe902e31
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -0,0 +1,107 @@
+package imagedestination
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageDestination operations
+// for a destination that only implements types.ImageDestination
+type wrapped struct {
+ stubs.NoPutBlobPartialInitialize
+
+ types.ImageDestination
+}
+
+// FromPublic(dest) returns an object that provides the private.ImageDestination API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageDestination, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageDestination.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementers of a public interface.
+func FromPublic(dest types.ImageDestination) private.ImageDestination {
+ if dest2, ok := dest.(private.ImageDestination); ok {
+ return dest2
+ }
+ return &wrapped{
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()),
+
+ ImageDestination: dest,
+ }
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ return private.UploadedBlob{
+ Digest: res.Digest,
+ Size: res.Size,
+ }, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if options.RequiredCompression != nil {
+ return false, private.ReusedBlob{}, nil
+ }
+ reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
+ if !reused || err != nil {
+ return reused, private.ReusedBlob{}, err
+ }
+ return true, private.ReusedBlob{
+ Digest: blob.Digest,
+ Size: blob.Size,
+ CompressionOperation: blob.CompressionOperation,
+ CompressionAlgorithm: blob.CompressionAlgorithm,
+ // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
+ // annotations, and we didn’t use the blob.Annotations field previously, so we’ll
+ // continue not using it.
+ }, nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ simpleSigs := [][]byte{}
+ for _, sig := range signatures {
+ simpleSig, ok := sig.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(sig)
+ }
+ simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature())
+ }
+ return w.PutSignatures(ctx, simpleSigs, instanceDigest)
+}
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ return w.Commit(ctx, options.UnparsedToplevel)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
new file mode 100644
index 000000000..7d859c312
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
@@ -0,0 +1,55 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+// See AddCompat below.
+type Compat struct {
+ src private.ImageSourceInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+//
+// Use it like this:
+//
+// type yourSource struct {
+// impl.Compat
+// …
+// }
+//
+// src := &yourSource{…}
+// src.Compat = impl.AddCompat(src)
+func AddCompat(src private.ImageSourceInternalOnly) Compat {
+ return Compat{src}
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ // Silently ignore signatures with other formats; the caller can’t handle them.
+ // Admittedly callers that want to sync all of the image might want to fail instead; this
+ // way an upgrade of c/image neither breaks them nor adds new functionality.
+ // Alternatively, we could possibly define the old GetSignatures to use the multi-format
+ // signature.Blob representation now, in general, but that could silently break them as well.
+ sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
new file mode 100644
index 000000000..d5eae6351
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
@@ -0,0 +1,23 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing.
+type DoesNotAffectLayerInfosForCopy struct{}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
new file mode 100644
index 000000000..73e8c78e9
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
@@ -0,0 +1,27 @@
+package impl
+
+// Properties collects properties of an ImageSource that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool {
+ return o.vals.HasThreadSafeGetBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
new file mode 100644
index 000000000..b3a8c7e88
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
@@ -0,0 +1,19 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignatures implements GetSignatures() that returns nothing.
+type NoSignatures struct{}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
new file mode 100644
index 000000000..286ae524b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
@@ -0,0 +1,54 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoGetBlobAtInitialize implements parts of private.ImageSource
+// for transports that don’t support GetBlobAt().
+// See NoGetBlobAt() below.
+type NoGetBlobAtInitialize struct {
+ transportName string
+}
+
+// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref.
+func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize {
+ return NoGetBlobAtRaw(ref.Transport().Name())
+}
+
+// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used
+// in situations where no ImageReference is available.
+func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize {
+ return NoGetBlobAtInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
+ return false
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+// If the Length for the last chunk is set to math.MaxUint64, then it
+// fully fetches the remaining data from the offset to the end of the blob.
+func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true.
+type ImplementsGetBlobAt struct{}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
new file mode 100644
index 000000000..cb345395e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
@@ -0,0 +1,28 @@
+// Package stubs contains trivial stubs for parts of private.ImageSource.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagesource/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
+//
+// implementation:
+//
+// type yourSource struct {
+// stubs.ImplementsGetBlobAt
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
+// means that a constructor must be called:
+//
+// type yourSource struct {
+// stubs.NoGetBlobAtInitialize
+// …
+// }
+//
+// dest := &yourSource{
+// …
+// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+// }
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
new file mode 100644
index 000000000..f0d1d042b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
@@ -0,0 +1,56 @@
+package imagesource
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageSource operations
+// for a source that only implements types.ImageSource
+type wrapped struct {
+ stubs.NoGetBlobAtInitialize
+
+ types.ImageSource
+}
+
+// FromPublic(src) returns an object that provides the private.ImageSource API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageSource, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageSource.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementers of a public interface.
+func FromPublic(src types.ImageSource) private.ImageSource {
+ if src2, ok := src.(private.ImageSource); ok {
+ return src2
+ }
+ return &wrapped{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()),
+
+ ImageSource: src,
+ }
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ sigs, err := w.GetSignatures(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
index 3fed1995c..f17d00246 100644
--- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
+++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
@@ -1,10 +1,8 @@
package iolimits
import (
+ "fmt"
"io"
- "io/ioutil"
-
- "github.com/pkg/errors"
)
// All constants below are intended to be used as limits for `ReadAtMost`. The
@@ -47,13 +45,13 @@ const (
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
limitedReader := io.LimitReader(reader, int64(limit+1))
- res, err := ioutil.ReadAll(limitedReader)
+ res, err := io.ReadAll(limitedReader)
if err != nil {
return nil, err
}
if len(res) > limit {
- return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
+ return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit)
}
return res, nil
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/common.go b/vendor/github.com/containers/image/v5/internal/manifest/common.go
new file mode 100644
index 000000000..1f2ccb528
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/common.go
@@ -0,0 +1,72 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// AllowedManifestFields is a bit mask of “essential†manifest fields that ValidateUnambiguousManifestFormat
+// can expect to be present.
+type AllowedManifestFields int
+
+const (
+ AllowedFieldConfig AllowedManifestFields = 1 << iota
+ AllowedFieldFSLayers
+ AllowedFieldHistory
+ AllowedFieldLayers
+ AllowedFieldManifests
+ AllowedFieldFirstUnusedBit // Keep this at the end!
+)
+
+// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
+// one kind we currently recognize, i.e. if they contain any of the known “essential†format fields
+// other than the ones the caller specifically allows.
+// expectedMIMEType is used only for diagnostics.
+// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
+// identification/version, or other “magic numbersâ€) before calling this, to cleanly reject unambiguous
+// data that just isn’t what was expected, as opposed to actually ambiguous data.
+func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
+ allowed AllowedManifestFields) error {
+ if allowed >= AllowedFieldFirstUnusedBit {
+ return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
+ }
+ // Use a private type to decode, not just a map[string]any, because we want
+ // to also reject case-insensitive matches (which would be used by Go when really decoding
+ // the manifest).
+ // (It is expected that as manifest formats are added or extended over time, more fields will be added
+ // here.)
+ detectedFields := struct {
+ Config any `json:"config"`
+ FSLayers any `json:"fsLayers"`
+ History any `json:"history"`
+ Layers any `json:"layers"`
+ Manifests any `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &detectedFields); err != nil {
+ // The caller was supposed to already validate version numbers, so this should not happen;
+ // let’s not bother with making this error “niceâ€.
+ return err
+ }
+ unexpected := []string{}
+ // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
+ if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
+ unexpected = append(unexpected, "config")
+ }
+ if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
+ unexpected = append(unexpected, "fsLayers")
+ }
+ if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
+ unexpected = append(unexpected, "history")
+ }
+ if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
+ unexpected = append(unexpected, "layers")
+ }
+ if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
+ unexpected = append(unexpected, "manifests")
+ }
+ if len(unexpected) != 0 {
+ return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
+ unexpected, expectedMIMEType)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go
new file mode 100644
index 000000000..68d079697
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go
@@ -0,0 +1,15 @@
+package manifest
+
+import (
+ "github.com/opencontainers/go-digest"
+)
+
+// Schema2Descriptor is a “descriptor†in docker/distribution schema 2.
+//
+// This is publicly visible as c/image/manifest.Schema2Descriptor.
+type Schema2Descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
new file mode 100644
index 000000000..07922cece
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
@@ -0,0 +1,311 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ platform "github.com/containers/image/v5/internal/pkg/platform"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Schema2PlatformSpec describes the platform which a particular manifest is
+// specialized for.
+// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
+type Schema2PlatformSpec struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"` // removed in OCI
+}
+
+// Schema2ManifestDescriptor references a platform-specific manifest.
+// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
+type Schema2ManifestDescriptor struct {
+ Schema2Descriptor
+ Platform Schema2PlatformSpec `json:"platform"`
+}
+
+// Schema2ListPublic is a list of platform-specific manifests.
+// This is publicly visible as c/image/manifest.Schema2List.
+// Internal users should usually use Schema2List instead.
+type Schema2ListPublic struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Manifests []Schema2ManifestDescriptor `json:"manifests"`
+}
+
+// MIMEType returns the MIME type of this particular manifest list.
+func (list *Schema2ListPublic) MIMEType() string {
+ return list.MediaType
+}
+
+// Instances returns a slice of digests of the manifests that this list knows of.
+func (list *Schema2ListPublic) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(list.Manifests))
+ for i, m := range list.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the list.
+func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range list.Manifests {
+ if manifest.Digest == instanceDigest {
+ ret := ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }
+ ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
+ platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
+ ret.ReadOnly.Platform = &platform
+ return ret, nil
+ }
+ }
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
+ }
+ return index.editInstances(editInstances)
+}
+
+func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []Schema2ManifestDescriptor{}
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ case ListOpAdd:
+ if editInstance.AddPlatform == nil {
+ // Should we create a struct with empty fields instead?
+ // Right now ListOpAdd is only called when an instance with the same platform value
+ // already exists in the manifest, so this should not be reached in practice.
+ return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported")
+ }
+ addedEntries = append(addedEntries, Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ Digest: editInstance.AddDigest,
+ Size: editInstance.AddSize,
+ MediaType: editInstance.AddMediaType,
+ },
+ schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform),
+ })
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
+ }
+ }
+ if len(addedEntries) != 0 {
+ // slices.Clone() here to ensure a private backing array;
+ // an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
+ index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
+ }
+ return nil
+}
+
+func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
+func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
+ return list.ChooseInstance(ctx)
+}
+
+// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
+// of the image which is appropriate for the current environment.
+func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ wantedPlatforms := platform.WantedPlatforms(ctx)
+ for _, wantedPlatform := range wantedPlatforms {
+ for _, d := range list.Manifests {
+ imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
+ if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
+ return d.Digest, nil
+ }
+ }
+ }
+ return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+}
+
+// Serialize returns the list in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (list *Schema2ListPublic) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(list)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
+ }
+ return buf, nil
+}
+
+// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
+// supplied data.
+// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
+func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
+ list := Schema2ListPublic{
+ SchemaVersion: 2,
+ MediaType: DockerV2ListMediaType,
+ Manifests: make([]Schema2ManifestDescriptor, len(components)),
+ }
+ for i, component := range components {
+ m := Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: component.MediaType,
+ Size: component.Size,
+ Digest: component.Digest,
+ URLs: slices.Clone(component.URLs),
+ },
+ Schema2PlatformSpec{
+ Architecture: component.Platform.Architecture,
+ OS: component.Platform.OS,
+ OSVersion: component.Platform.OSVersion,
+ OSFeatures: slices.Clone(component.Platform.OSFeatures),
+ Variant: component.Platform.Variant,
+ Features: slices.Clone(component.Platform.Features),
+ },
+ }
+ list.Manifests[i] = m
+ }
+ return &list
+}
+
+// Schema2ListPublicClone creates a deep copy of the passed-in list.
+// This is publicly visible as c/image/manifest.Schema2ListClone.
+func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
+ return Schema2ListPublicFromComponents(list.Manifests)
+}
+
+// ToOCI1Index returns the list encoded as an OCI1 index.
+func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
+ components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
+ for _, manifest := range list.Manifests {
+ platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
+ components = append(components, imgspecv1.Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: slices.Clone(manifest.URLs),
+ Platform: &platform,
+ })
+ }
+ oci := OCI1IndexPublicFromComponents(components, nil)
+ return oci, nil
+}
+
+// ToSchema2List returns the list encoded as a Schema2 list.
+func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
+ return Schema2ListPublicClone(list), nil
+}
+
+// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
+func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
+ list := Schema2ListPublic{
+ Manifests: []Schema2ManifestDescriptor{},
+ }
+ if err := json.Unmarshal(manifest, &list); err != nil {
+ return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
+ }
+ if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
+ AllowedFieldManifests); err != nil {
+ return nil, err
+ }
+ return &list, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (list *Schema2ListPublic) Clone() ListPublic {
+ return Schema2ListPublicClone(list)
+}
+
+// ConvertToMIMEType converts the passed-in manifest list to a manifest
+// list of the specified type.
+func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return list.Clone(), nil
+ case imgspecv1.MediaTypeImageIndex:
+ return list.ToOCI1Index()
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
+ }
+}
+
+// Schema2List is a list of platform-specific manifests.
+type Schema2List struct {
+ Schema2ListPublic
+}
+
+func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
+ return &Schema2List{*public}
+}
+
+func (index *Schema2List) CloneInternal() List {
+ return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
+}
+
+func (index *Schema2List) Clone() ListPublic {
+ return index.CloneInternal()
+}
+
+// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
+ public, err := Schema2ListPublicFromManifest(manifest)
+ if err != nil {
+ return nil, err
+ }
+ return schema2ListFromPublic(public), nil
+}
+
+// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture.
+func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform {
+ return imgspecv1.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ // Features is not supported in OCI, and discarded.
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
new file mode 100644
index 000000000..6c8e233d9
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
@@ -0,0 +1,56 @@
+package manifest
+
+import (
+ "fmt"
+
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
+// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
+const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image†in the standard sense (e.g. an OCI artifact)
+//
+// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor)
+type NonImageArtifactError struct {
+ // Callers should not be blindly calling image-specific operations and only checking MIME types
+ // on failure; if they care about the artifact type, they should check before using it.
+ // If they blindly assume an image, they don’t really need this value; just a type check
+ // is sufficient for basic "we can only pull images" UI.
+ //
+ // Also, there are fairly widespread “artifacts†which nevertheless use imgspecv1.MediaTypeImageConfig,
+ // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
+ // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
+ // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
+ // based on that kind of data.
+ //
+ // So, let’s not expose this until a specific need is identified.
+ mimeType string
+}
+
+// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest.
+//
+// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig .
+func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error {
+ // Callers decide based on manifest.Config.MediaType that this is not an image;
+ // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically
+ // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON.
+ //
+ // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably
+ // ArtifactType is the “top-level†one, although that’s not defined by the spec.
+ mimeType := manifest.ArtifactType
+ if mimeType == "" {
+ mimeType = manifest.Config.MediaType
+ }
+ return NonImageArtifactError{mimeType: mimeType}
+}
+
+func (e NonImageArtifactError) Error() string {
+ // Special-case these invalid mixed images, which show up from time to time:
+ if e.mimeType == dockerV2Schema2ConfigMediaType {
+ return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType)
+ }
+ return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go
new file mode 100644
index 000000000..1c614d124
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go
@@ -0,0 +1,133 @@
+package manifest
+
+import (
+ "fmt"
+
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ListPublic is a subset of List which is a part of the public API;
+// so no methods can be added, removed or changed.
+//
+// Internal users should usually use List instead.
+type ListPublic interface {
+ // MIMEType returns the MIME type of this particular manifest list.
+ MIMEType() string
+
+ // Instances returns a list of the manifests that this list knows of, other than its own.
+ Instances() []digest.Digest
+
+ // Update information about the list's instances. The length of the passed-in slice must
+ // match the length of the list of instances which the list already contains, and every field
+ // must be specified.
+ UpdateInstances([]ListUpdate) error
+
+ // Instance returns the size and MIME type of a particular instance in the list.
+ Instance(digest.Digest) (ListUpdate, error)
+
+ // ChooseInstance selects which manifest is most appropriate for the platform described by the
+ // SystemContext, or for the current platform if the SystemContext doesn't specify any details.
+ ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
+
+ // Serialize returns the list in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
+ // from, even if no modifications were made!
+ Serialize() ([]byte, error)
+
+ // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
+ ConvertToMIMEType(mimeType string) (ListPublic, error)
+
+ // Clone returns a deep copy of this list and its contents.
+ Clone() ListPublic
+}
+
+// List is an interface for parsing, modifying lists of image manifests.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
+// directly.
+type List interface {
+ ListPublic
+ // CloneInternal returns a deep copy of this list and its contents.
+ CloneInternal() List
+ // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the
+ // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
+ // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
+ ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
+ // Edit information about the list's instances. Contains Slice of ListEdit where each element
+ // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is
+ // selected on the basis of configured ListOperation field.
+ EditInstances([]ListEdit) error
+}
+
+// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
+// This is publicly visible as c/image/manifest.ListUpdate.
+type ListUpdate struct {
+ Digest digest.Digest
+ Size int64
+ MediaType string
+ // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance()
+ ReadOnly struct {
+ Platform *imgspecv1.Platform
+ Annotations map[string]string
+ CompressionAlgorithmNames []string
+ ArtifactType string
+ }
+}
+
+type ListOp int
+
+const (
+ listOpInvalid ListOp = iota
+ ListOpAdd
+ ListOpUpdate
+)
+
+// ListEdit includes the fields which a List's EditInstances() method will modify.
+type ListEdit struct {
+ ListOperation ListOp
+
+ // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set.
+ UpdateOldDigest digest.Digest
+ UpdateDigest digest.Digest
+ UpdateSize int64
+ UpdateMediaType string
+ UpdateAffectAnnotations bool
+ UpdateAnnotations map[string]string
+ UpdateCompressionAlgorithms []compression.Algorithm
+
+ // If Op = ListEditAdd. All fields must be set.
+ AddDigest digest.Digest
+ AddSize int64
+ AddMediaType string
+ AddArtifactType string
+ AddPlatform *imgspecv1.Platform
+ AddAnnotations map[string]string
+ AddCompressionAlgorithms []compression.Algorithm
+}
+
+// ListPublicFromBlob parses a list of manifests.
+// This is publicly visible as c/image/manifest.ListFromBlob.
+func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
+ list, err := ListFromBlob(manifest, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+ return list, nil
+}
+
+// ListFromBlob parses a list of manifests.
+func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
+ normalized := NormalizedMIMEType(manifestMIMEType)
+ switch normalized {
+ case DockerV2ListMediaType:
+ return Schema2ListFromManifest(manifest)
+ case imgspecv1.MediaTypeImageIndex:
+ return OCI1IndexFromManifest(manifest)
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
+ }
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
new file mode 100644
index 000000000..3fb52104a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
@@ -0,0 +1,226 @@
+package manifest
+
+import (
+ "encoding/json"
+ "slices"
+
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/libtrust"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
+
+// FIXME(runcom, mitr): should we have a mediatype pkg??
+const (
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
+ DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
+ // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
+ DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
+ DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+ // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
+ DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
+ DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
+ // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
+ DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+)
+
+// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
+// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
+// but we may not have such metadata available (e.g. when the manifest is a local file).
+// This is publicly visible as c/image/manifest.GuessMIMEType.
+func GuessMIMEType(manifest []byte) string {
+ // A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
+ // Also docker/distribution/manifest.Versioned.
+ meta := struct {
+ MediaType string `json:"mediaType"`
+ SchemaVersion int `json:"schemaVersion"`
+ Signatures any `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(manifest, &meta); err != nil {
+ return ""
+ }
+
+ switch meta.MediaType {
+ case DockerV2Schema2MediaType, DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
+ return meta.MediaType
+ }
+ // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
+ switch meta.SchemaVersion {
+ case 1:
+ if meta.Signatures != nil {
+ return DockerV2Schema1SignedMediaType
+ }
+ return DockerV2Schema1MediaType
+ case 2:
+ // Best effort to understand if this is an OCI image since mediaType
+ // wasn't in the manifest for OCI image-spec < 1.0.2.
+ // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
+ ociMan := struct {
+ Config struct {
+ MediaType string `json:"mediaType"`
+ } `json:"config"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociMan); err != nil {
+ return ""
+ }
+ switch ociMan.Config.MediaType {
+ case imgspecv1.MediaTypeImageConfig:
+ return imgspecv1.MediaTypeImageManifest
+ case DockerV2Schema2ConfigMediaType:
+ // This case should not happen since a Docker image
+ // must declare a top-level media type and
+ // `meta.MediaType` has already been checked.
+ return DockerV2Schema2MediaType
+ }
+ // Maybe an image index or an OCI artifact.
+ ociIndex := struct {
+ Manifests []imgspecv1.Descriptor `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociIndex); err != nil {
+ return ""
+ }
+ if len(ociIndex.Manifests) != 0 {
+ if ociMan.Config.MediaType == "" {
+ return imgspecv1.MediaTypeImageIndex
+ }
+ // FIXME: this is mixing media types of manifests and configs.
+ return ociMan.Config.MediaType
+ }
+ // It's most likely an OCI artifact with a custom config media
+ // type which is not (and cannot) be covered by the media-type
+ // checks cabove.
+ return imgspecv1.MediaTypeImageManifest
+ }
+ return ""
+}
+
+// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
+// This is publicly visible as c/image/manifest.Digest.
+func Digest(manifest []byte) (digest.Digest, error) {
+ if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
+ sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
+ if err != nil {
+ return "", err
+ }
+ manifest, err = sig.Payload()
+ if err != nil {
+ // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
+ // that libtrust itself has josebase64UrlEncode()d
+ return "", err
+ }
+ }
+
+ return digest.FromBytes(manifest), nil
+}
+
+// MatchesDigest returns true iff the manifest matches expectedDigest.
+// Error may be set if this returns false.
+// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
+// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
+// This is publicly visible as c/image/manifest.MatchesDigest.
+func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
+ // This should eventually support various digest types.
+ actualDigest, err := Digest(manifest)
+ if err != nil {
+ return false, err
+ }
+ return expectedDigest == actualDigest, nil
+}
+
+// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
+// centralizing various workarounds.
+// This is publicly visible as c/image/manifest.NormalizedMIMEType.
+func NormalizedMIMEType(input string) string {
+ switch input {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case "application/json":
+ return DockerV2Schema1SignedMediaType
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType:
+ return input
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized†in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return DockerV2Schema1SignedMediaType
+ }
+}
+
+// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
+func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
+ // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm().
+ switch algo.Name() {
+ case compressiontypes.GzipAlgorithmName:
+ return true
+ default:
+ return false
+ }
+}
+
+// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo.
+func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool {
+ if CompressionAlgorithmIsUniversallySupported(algo) {
+ return true
+ }
+ // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields.
+ // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge.
+ switch algo.Name() {
+ case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
+ return mimeType == imgspecv1.MediaTypeImageManifest
+ default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
+ return false
+ }
+}
+
+// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions;
+// it is a struct to allow longer and better-documented field names.
+type ReuseConditions struct {
+ PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
+ RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
+}
+
+// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression
+// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
+func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
+ if c.RequiredCompression != nil {
+ if candidateCompression == nil ||
+ (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
+ return false
+ }
+ }
+
+ // For candidateCompression == nil, we can’t tell the difference between “uncompressed†and “unknownâ€;
+ // and “uncompressed†is acceptable in all known formats (well, it seems to work in practice for schema1),
+ // so don’t impose any restrictions if candidateCompression == nil
+ if c.PossibleManifestFormats != nil && candidateCompression != nil {
+ if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool {
+ return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
+ }) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
new file mode 100644
index 000000000..6a0f88d3a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
@@ -0,0 +1,470 @@
+package manifest
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "math"
+ "runtime"
+ "slices"
+
+ platform "github.com/containers/image/v5/internal/pkg/platform"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index.
+ // The value of the annotation must be the string "true".
+ // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression.
+ // That also suggests that this instance benefits from
+ // Zstd compression, so it can be preferred by compatible consumers over instances that
+ // use gzip, depending on their local policy.
+ OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd"
+ OCI1InstanceAnnotationCompressionZSTDValue = "true"
+)
+
+// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
+// provide methods for.
+// This is publicly visible as c/image/manifest.OCI1Index
+// Internal users should usually use OCI1Index instead.
+type OCI1IndexPublic struct {
+ imgspecv1.Index
+}
+
+// MIMEType returns the MIME type of this particular manifest index.
+func (index *OCI1IndexPublic) MIMEType() string {
+ return imgspecv1.MediaTypeImageIndex
+}
+
+// Instances returns a slice of digests of the manifests that this index knows of.
+func (index *OCI1IndexPublic) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(index.Manifests))
+ for i, m := range index.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the index.
+func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range index.Manifests {
+ if manifest.Digest == instanceDigest {
+ ret := ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }
+ ret.ReadOnly.Platform = manifest.Platform
+ ret.ReadOnly.Annotations = manifest.Annotations
+ ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
+ ret.ReadOnly.ArtifactType = manifest.ArtifactType
+ return ret, nil
+ }
+ }
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
+ }
+ return index.editInstances(editInstances)
+}
+
+func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string {
+ result := make([]string, 0, 1)
+ if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue {
+ result = append(result, compression.ZstdAlgorithmName)
+ }
+ // No compression was detected, hence assume instance has default compression `Gzip`
+ if len(result) == 0 {
+ result = append(result, compression.GzipAlgorithmName)
+ }
+ return result
+}
+
+func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) {
+ // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
+ // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
+ // and full compressionAlghorithms list.
+ if *annotationsMap == nil && len(compressionAlgorithms) > 0 {
+ *annotationsMap = map[string]string{}
+ }
+ for _, algo := range compressionAlgorithms {
+ switch algo.BaseVariantName() {
+ case compression.ZstdAlgorithmName:
+ (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
+ default:
+ continue
+ }
+ }
+}
+
+func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []imgspecv1.Descriptor{}
+ updatedAnnotations := false
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ if editInstance.UpdateAnnotations != nil {
+ updatedAnnotations = true
+ if editInstance.UpdateAffectAnnotations {
+ index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations)
+ } else {
+ if index.Manifests[targetIndex].Annotations == nil {
+ index.Manifests[targetIndex].Annotations = map[string]string{}
+ }
+ maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
+ }
+ }
+ addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations)
+ case ListOpAdd:
+ annotations := map[string]string{}
+ if editInstance.AddAnnotations != nil {
+ annotations = maps.Clone(editInstance.AddAnnotations)
+ }
+ addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
+ addedEntries = append(addedEntries, imgspecv1.Descriptor{
+ MediaType: editInstance.AddMediaType,
+ ArtifactType: editInstance.AddArtifactType,
+ Size: editInstance.AddSize,
+ Digest: editInstance.AddDigest,
+ Platform: editInstance.AddPlatform,
+ Annotations: annotations,
+ })
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
+ }
+ }
+ if len(addedEntries) != 0 {
+ // slices.Clone() here to ensure the slice uses a private backing array;
+ // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
+ index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
+ }
+ if len(addedEntries) != 0 || updatedAnnotations {
+ slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
+ // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
+ // into more algorithms?
+ aZstd := instanceIsZstd(a)
+ bZstd := instanceIsZstd(b)
+ switch {
+ case aZstd == bZstd:
+ return 0
+ case !aZstd: // Implies bZstd
+ return -1
+ default: // aZstd && !bZstd
+ return 1
+ }
+ })
+ }
+ return nil
+}
+
+func (index *OCI1Index) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
+// instanceIsZstd returns true if instance is a zstd instance otherwise false.
+func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
+ if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
+ return true
+ }
+ return false
+}
+
+type instanceCandidate struct {
+ platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform
+ isZstd bool // tells if particular instance if zstd instance
+ manifestPosition int // A zero-based index of the instance in the manifest list
+ digest digest.Digest // Instance digest
+}
+
+func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
+ switch {
+ case ic.platformIndex != other.platformIndex:
+ return ic.platformIndex < other.platformIndex
+ case ic.isZstd != other.isZstd:
+ if !preferGzip {
+ return ic.isZstd
+ } else {
+ return !ic.isZstd
+ }
+ case ic.manifestPosition != other.manifestPosition:
+ return ic.manifestPosition < other.manifestPosition
+ }
+ panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
+}
+
+// chooseInstance is a private equivalent to ChooseInstanceByCompression,
+// shared by ChooseInstance and ChooseInstanceByCompression.
+func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ didPreferGzip := false
+ if preferGzip == types.OptionalBoolTrue {
+ didPreferGzip = true
+ }
+ wantedPlatforms := platform.WantedPlatforms(ctx)
+ var bestMatch *instanceCandidate
+ bestMatch = nil
+ for manifestIndex, d := range index.Manifests {
+ candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
+ if d.Platform != nil {
+ imagePlatform := ociPlatformClone(*d.Platform)
+ platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
+ return platform.MatchesPlatform(imagePlatform, wantedPlatform)
+ })
+ if platformIndex == -1 {
+ continue
+ }
+ candidate.platformIndex = platformIndex
+ }
+ if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
+ bestMatch = &candidate
+ }
+ }
+ if bestMatch != nil {
+ return bestMatch.digest, nil
+ }
+ return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+}
+
+func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ return index.chooseInstance(ctx, preferGzip)
+}
+
+// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
+// of the image which is appropriate for the current environment.
+func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ return index.chooseInstance(ctx, types.OptionalBoolFalse)
+}
+
+// Serialize returns the index in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(index)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
+ }
+ return buf, nil
+}
+
+// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
+// supplied data.
+// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
+func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
+ index := OCI1IndexPublic{
+ imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Manifests: make([]imgspecv1.Descriptor, len(components)),
+ Annotations: maps.Clone(annotations),
+ },
+ }
+ for i, component := range components {
+ index.Manifests[i] = oci1DescriptorClone(component)
+ }
+ return &index
+}
+
+func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor {
+ var platform *imgspecv1.Platform
+ if d.Platform != nil {
+ platformCopy := ociPlatformClone(*d.Platform)
+ platform = &platformCopy
+ }
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Digest: d.Digest,
+ Size: d.Size,
+ URLs: slices.Clone(d.URLs),
+ Annotations: maps.Clone(d.Annotations),
+ Data: bytes.Clone(d.Data),
+ Platform: platform,
+ ArtifactType: d.ArtifactType,
+ }
+}
+
+// OCI1IndexPublicClone creates a deep copy of the passed-in index.
+// This is publicly visible as c/image/manifest.OCI1IndexClone.
+func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
+ var subject *imgspecv1.Descriptor
+ if index.Subject != nil {
+ s := oci1DescriptorClone(*index.Subject)
+ subject = &s
+ }
+ manifests := make([]imgspecv1.Descriptor, len(index.Manifests))
+ for i, m := range index.Manifests {
+ manifests[i] = oci1DescriptorClone(m)
+ }
+ return &OCI1IndexPublic{
+ Index: imgspecv1.Index{
+ Versioned: index.Versioned,
+ MediaType: index.MediaType,
+ ArtifactType: index.ArtifactType,
+ Manifests: manifests,
+ Subject: subject,
+ Annotations: maps.Clone(index.Annotations),
+ },
+ }
+}
+
+// ToOCI1Index returns the index encoded as an OCI1 index.
+func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
+ return OCI1IndexPublicClone(index), nil
+}
+
+// ToSchema2List returns the index encoded as a Schema2 list.
+func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
+ components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
+ for _, manifest := range index.Manifests {
+ platform := manifest.Platform
+ if platform == nil {
+ platform = &imgspecv1.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ }
+ }
+ components = append(components, Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: slices.Clone(manifest.URLs),
+ },
+ schema2PlatformSpecFromOCIPlatform(*platform),
+ })
+ }
+ s2 := Schema2ListPublicFromComponents(components)
+ return s2, nil
+}
+
+// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest index.
+// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
+func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
+ index := OCI1IndexPublic{
+ Index: imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Manifests: []imgspecv1.Descriptor{},
+ Annotations: make(map[string]string),
+ },
+ }
+ if err := json.Unmarshal(manifest, &index); err != nil {
+ return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
+ }
+ if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
+ AllowedFieldManifests); err != nil {
+ return nil, err
+ }
+ return &index, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (index *OCI1IndexPublic) Clone() ListPublic {
+ return OCI1IndexPublicClone(index)
+}
+
+// ConvertToMIMEType converts the passed-in image index to a manifest list of
+// the specified type.
+func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return index.ToSchema2List()
+ case imgspecv1.MediaTypeImageIndex:
+ return index.Clone(), nil
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
+ }
+}
+
+type OCI1Index struct {
+ OCI1IndexPublic
+}
+
+func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
+ return &OCI1Index{*public}
+}
+
+func (index *OCI1Index) CloneInternal() List {
+ return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
+}
+
+func (index *OCI1Index) Clone() ListPublic {
+ return index.CloneInternal()
+}
+
+// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest list.
+func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
+ public, err := OCI1IndexPublicFromManifest(manifest)
+ if err != nil {
+ return nil, err
+ }
+ return oci1IndexFromPublic(public), nil
+}
+
+// ociPlatformClone returns an independent copy of p.
+func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform {
+ // The only practical way in Go to give read-only access to an array is to copy it.
+ // The only practical way in Go to copy a deep structure is to either do it manually field by field,
+ // or to use reflection (incl. a round-trip through JSON, which uses reflection).
+ //
+ // The combination of the two is just sad, and leads to code like this, which will
+ // need to be updated with every new Platform field.
+ return imgspecv1.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ }
+}
+
+// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure.
+func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec {
+ return Schema2PlatformSpec{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ Features: nil,
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/internal/multierr/multierr.go b/vendor/github.com/containers/image/v5/internal/multierr/multierr.go
new file mode 100644
index 000000000..1341925c1
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/multierr/multierr.go
@@ -0,0 +1,34 @@
+package multierr
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Format creates an error value from the input array (which should not be empty)
+// If the input contains a single error value, it is returned as is.
+// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings.
+//
+// Typical usage:
+//
+// var errs []error
+// // …
+// errs = append(errs, …)
+// // …
+// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)}
+func Format(first, middle, last string, errs []error) error {
+ switch len(errs) {
+ case 0:
+ return fmt.Errorf("internal error: multierr.Format called with 0 errors")
+ case 1:
+ return errs[0]
+ default:
+ // We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid:
+ // []error is not a valid parameter to a function expecting []any
+ anyErrs := make([]any, 0, len(errs))
+ for _, e := range errs {
+ anyErrs = append(anyErrs, e)
+ }
+ return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...)
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
deleted file mode 100644
index 88e123cdd..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// Key represents a single key linked to one or more kernel keyrings.
-type Key struct {
- Name string
-
- id, ring keyID
- size int
-}
-
-// ID returns the 32-bit kernel identifier for a specific key
-func (k *Key) ID() int32 {
- return int32(k.id)
-}
-
-// Get the key's value as a byte slice
-func (k *Key) Get() ([]byte, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
-
- if k.size == 0 {
- k.size = 512
- }
-
- size := k.size
-
- b = make([]byte, int(size))
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- k.size = sizeRead
- }
- }
- return b[:k.size], err
-}
-
-// Unlink a key from the keyring it was loaded from (or added to). If the key
-// is not linked to any other keyrings, it is destroyed.
-func (k *Key) Unlink() error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0)
- return err
-}
-
-// Describe returns a string describing the attributes of a specified key
-func (k *Key) Describe() (string, error) {
- keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id))
- if err != nil {
- return "", err
- }
- return keyAttr, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
deleted file mode 100644
index 91c64a1b8..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
-package keyctl
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Keyring is the basic interface to a linux keyctl keyring.
-type Keyring interface {
- ID
- Add(string, []byte) (*Key, error)
- Search(string) (*Key, error)
-}
-
-type keyring struct {
- id keyID
-}
-
-// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have.
-type ID interface {
- ID() int32
-}
-
-// Add a new key to a keyring. The key can be searched for later by name.
-func (kr *keyring) Add(name string, key []byte) (*Key, error) {
- r, err := unix.AddKey("user", name, key, int(kr.id))
- if err == nil {
- key := &Key{Name: name, id: keyID(r), ring: kr.id}
- return key, nil
- }
- return nil, err
-}
-
-// Search for a key by name, this also searches child keyrings linked to this
-// one. The key, if found, is linked to the top keyring that Search() was called
-// from.
-func (kr *keyring) Search(name string) (*Key, error) {
- id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0)
- if err == nil {
- return &Key{Name: name, id: keyID(id), ring: kr.id}, nil
- }
- return nil, err
-}
-
-// ID returns the 32-bit kernel identifier of a keyring
-func (kr *keyring) ID() int32 {
- return int32(kr.id)
-}
-
-// SessionKeyring returns the current login session keyring
-func SessionKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_SESSION_KEYRING)
-}
-
-// UserKeyring returns the keyring specific to the current user.
-func UserKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_USER_KEYRING)
-}
-
-// Unlink an object from a keyring
-func Unlink(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// Link a key into a keyring
-func Link(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it
-func ReadUserKeyring() ([]*Key, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
- krSize := 4
- size := krSize
- b = make([]byte, size)
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- krSize = sizeRead
- }
- }
- keyIDs := getKeyIDsFromByte(b[:krSize])
- return keyIDs, err
-}
-
-func getKeyIDsFromByte(byteKeyIDs []byte) []*Key {
- idSize := 4
- var keys []*Key
- for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize {
- tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx]))
- keys = append(keys, &Key{id: keyID(tempID)})
- }
- return keys
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
deleted file mode 100644
index ae9697149..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// KeyPerm represents in-kernel access control permission to keys and keyrings
-// as a 32-bit integer broken up into four permission sets, one per byte.
-// In MSB order, the perms are: Processor, User, Group, Other.
-type KeyPerm uint32
-
-const (
- // PermOtherAll sets all permission for Other
- PermOtherAll KeyPerm = 0x3f << (8 * iota)
- // PermGroupAll sets all permission for Group
- PermGroupAll
- // PermUserAll sets all permission for User
- PermUserAll
- // PermProcessAll sets all permission for Processor
- PermProcessAll
-)
-
-// SetPerm sets the permissions on a key or keyring.
-func SetPerm(k ID, p KeyPerm) error {
- err := unix.KeyctlSetperm(int(k.ID()), uint32(p))
- return err
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
deleted file mode 100644
index 196c82760..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-type keyID int32
-
-func newKeyring(id keyID) (*keyring, error) {
- r1, err := unix.KeyctlGetKeyringID(int(id), true)
- if err != nil {
- return nil, err
- }
-
- if id < 0 {
- r1 = int(id)
- }
- return &keyring{id: keyID(r1)}, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
index 3e81e06c0..3a16dad63 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
@@ -9,7 +9,7 @@ package platform
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -21,10 +21,12 @@ import (
"fmt"
"os"
"runtime"
+ "slices"
"strings"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
@@ -62,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) {
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
}
-func getCPUVariantWindows(arch string) string {
- // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
+func getCPUVariantDarwinWindows(arch string) string {
+ // Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch arch {
@@ -81,15 +83,40 @@ func getCPUVariantWindows(arch string) string {
func getCPUVariantArm() string {
variant, err := getCPUInfo("Cpu architecture")
if err != nil {
+ logrus.Errorf("Couldn't get cpu architecture: %v", err)
return ""
}
- // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286)
switch strings.ToLower(variant) {
case "8", "aarch64":
variant = "v8"
- case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
+ case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
+ case "7":
+ // handle RPi Zero variant mismatch due to wrong variant from kernel
+ // https://github.com/containerd/containerd/pull/4530
+ // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
+ // https://github.com/moby/moby/pull/36121#issuecomment-398328286
+ model, err := getCPUInfo("model name")
+ if err != nil {
+ logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err)
+ return ""
+ }
+ // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection,
+ // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35
+ // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 )
+ // ARM CPU ID starts with a “main†ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en ,
+ // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en .
+ // The Linux "cpu architecture" is determined by a “memory model†feature.
+ //
+ // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of
+ // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S .
+ if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
+ logrus.Debugf("Detected corner case, setting cpu variant to v6")
+ variant = "v6"
+ } else {
+ variant = "v7"
+ }
case "6", "6tej":
variant = "v6"
case "5", "5t", "5te", "5tej":
@@ -106,8 +133,8 @@ func getCPUVariantArm() string {
}
func getCPUVariant(os string, arch string) string {
- if os == "windows" {
- return getCPUVariantWindows(arch)
+ if os == "darwin" || os == "windows" {
+ return getCPUVariantDarwinWindows(arch)
}
if arch == "arm" || arch == "arm64" {
return getCPUVariantArm()
@@ -126,7 +153,11 @@ var compatibility = map[string][]string{
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
-func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
+func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform {
+ // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
+ // The fields are not specified by the OCI specification, as of version 1.1, usefully enough
+ // to be interoperable, anyway.
+
wantedArch := runtime.GOARCH
wantedVariant := ""
if ctx != nil && ctx.ArchitectureChoice != "" {
@@ -152,13 +183,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
if wantedVariant != "" {
// If the user requested a specific variant, we'll walk down
// the list from most to least compatible.
- if compatibility[wantedArch] != nil {
- variantOrder := compatibility[wantedArch]
- for i, v := range variantOrder {
- if wantedVariant == v {
- variants = variantOrder[i:]
- break
- }
+ if variantOrder := compatibility[wantedArch]; variantOrder != nil {
+ if i := slices.Index(variantOrder, wantedVariant); i != -1 {
+ variants = variantOrder[i:]
}
}
if variants == nil {
@@ -184,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
Variant: v,
})
}
- return res, nil
+ return res
}
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
new file mode 100644
index 000000000..4247a8db7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -0,0 +1,224 @@
+package private
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/signature"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// ImageSourceInternalOnly is the part of private.ImageSource that is not
+// a part of types.ImageSource.
+type ImageSourceInternalOnly interface {
+ // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+ SupportsGetBlobAt() bool
+ // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
+ BlobChunkAccessor
+
+ // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error)
+}
+
+// ImageSource is an internal extension to the types.ImageSource interface.
+type ImageSource interface {
+ types.ImageSource
+ ImageSourceInternalOnly
+}
+
+// ImageDestinationInternalOnly is the part of private.ImageDestination that is not
+// a part of types.ImageDestination.
+type ImageDestinationInternalOnly interface {
+ // SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+ SupportsPutBlobPartial() bool
+ // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
+ // on unsupported formats.
+
+ // PutBlobWithOptions writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+ PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
+
+ // PutBlobPartial attempts to create a blob using the data that is already present
+ // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+ // It is available only if SupportsPutBlobPartial().
+ // Even if SupportsPutBlobPartial() returns true, the call can fail.
+ // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+ // The fallback _must not_ be done otherwise.
+ PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
+
+ // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If the blob has been successfully reused, returns (true, info, nil).
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
+
+ // PutSignaturesWithFormat writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
+
+ // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+ // WARNING: This does not have any transactional semantics:
+ // - Uploaded data MAY be visible to others before CommitWithOptions() is called
+ // - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+ CommitWithOptions(ctx context.Context, options CommitOptions) error
+}
+
+// ImageDestination is an internal extension to the types.ImageDestination
+// interface.
+type ImageDestination interface {
+ types.ImageDestination
+ ImageDestinationInternalOnly
+}
+
+// UploadedBlob is information about a blob written to a destination.
+// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
+type UploadedBlob struct {
+ Digest digest.Digest
+ Size int64
+}
+
+// PutBlobOptions are used in PutBlobWithOptions.
+type PutBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
+ IsConfig bool // True if the blob is a config
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+}
+
+// PutBlobPartialOptions are used in PutBlobPartial.
+type PutBlobPartialOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
+ LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs)
+}
+
+// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
+type TryReusingBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
+ // If true, it is allowed to use an equivalent of the desired blob;
+ // in that case the returned info may not match the input.
+ CanSubstitute bool
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+ SrcRef reference.Named // A reference to the source image that contains the input blob.
+ PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob.
+ RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
+ OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed†or “unknownâ€.
+ TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
+}
+
+// ReusedBlob is information about a blob reused in a destination.
+// It is the subset of types.BlobInfo fields the transport is responsible for setting.
+type ReusedBlob struct {
+ Digest digest.Digest // Must be provided
+ Size int64 // Must be provided
+ // The following compression fields should be set when the reuse substitutes
+ // a differently-compressed blob.
+ // They may be set also to change from a base variant to a specific variant of an algorithm.
+ CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
+ CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
+
+ // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
+ // added even if the digest doesn’t change (if we found the annotations in a cache).
+ CompressionAnnotations map[string]string
+
+ MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
+}
+
+// CommitOptions are used in CommitWithOptions
+type CommitOptions struct {
+ // UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+ // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+ // original manifest list digest, if desired.
+ UnparsedToplevel types.UnparsedImage
+ // ReportResolvedReference, if set, asks the transport to store a “resolved†(more detailed) reference to the created image
+ // into the value this option points to.
+ // What “resolved†means is transport-specific.
+ // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value.
+ ReportResolvedReference *types.ImageReference
+}
+
+// ImageSourceChunk is a portion of a blob.
+// This API is experimental and can be changed without bumping the major version number.
+type ImageSourceChunk struct {
+ // Offset specifies the starting position of the chunk within the source blob.
+ Offset uint64
+
+ // Length specifies the size of the chunk. If it is set to math.MaxUint64,
+ // then it refers to all the data from Offset to the end of the blob.
+ Length uint64
+}
+
+// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
+type BlobChunkAccessor interface {
+ // GetBlobAt returns a sequential channel of readers that contain data for the requested
+ // blob chunks, and a channel that might get a single error value.
+ // The specified chunks must be not overlapping and sorted by their offset.
+ // The readers must be fully consumed, in the order they are returned, before blocking
+ // to read the next chunk.
+ // If the Length for the last chunk is set to math.MaxUint64, then it
+ // fully fetches the remaining data from the offset to the end of the blob.
+ GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
+}
+
+// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
+type BadPartialRequestError struct {
+ Status string
+}
+
+func (e BadPartialRequestError) Error() string {
+ return e.Status
+}
+
+// UnparsedImage is an internal extension to the types.UnparsedImage interface.
+type UnparsedImage interface {
+ types.UnparsedImage
+ // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+ UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
+}
+
+// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial.
+// It suggests to the caller that a fallback mechanism can be used instead of a hard failure;
+// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob.
+type ErrFallbackToOrdinaryLayerDownload struct {
+ err error
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
+ return c.err.Error()
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
+ return c.err
+}
+
+func NewErrFallbackToOrdinaryLayerDownload(err error) error {
+ return ErrFallbackToOrdinaryLayerDownload{err: err}
+}
diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go
new file mode 100644
index 000000000..b8d3a7e56
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go
@@ -0,0 +1,57 @@
+package putblobdigest
+
+import (
+ "io"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Digester computes a digest of the provided stream, if not known yet.
+type Digester struct {
+ knownDigest digest.Digest // Or ""
+ digester digest.Digester // Or nil
+}
+
+// newDigester initiates computation of a digest.Canonical digest of stream,
+// if !validDigest; otherwise it just records knownDigest to be returned later.
+// The caller MUST use the returned stream instead of the original value.
+func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) {
+ if validDigest {
+ return Digester{knownDigest: knownDigest}, stream
+ } else {
+ res := Digester{
+ digester: digest.Canonical.Digester(),
+ }
+ stream = io.TeeReader(stream, res.digester.Hash())
+ return res, stream
+ }
+}
+
+// DigestIfUnknown initiates computation of a digest.Canonical digest of stream,
+// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will
+// be used (accepting any algorithm).
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "")
+}
+
+// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream,
+// if a digest.Canonical digest is not supplied in the provided blobInfo;
+// otherwise blobInfo.Digest will be used.
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical)
+}
+
+// Digest() returns a digest value possibly computed by Digester.
+// This must be called only after all of the stream returned by a Digester constructor
+// has been successfully read.
+func (d Digester) Digest() digest.Digest {
+ if d.digester != nil {
+ return d.digester.Digest()
+ }
+ return d.knownDigest
+}
diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go
new file mode 100644
index 000000000..acf30343e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/set/set.go
@@ -0,0 +1,52 @@
+package set
+
+import "golang.org/x/exp/maps"
+
+// FIXME:
+// - Docstrings
+// - This should be in a public library somewhere
+
+type Set[E comparable] struct {
+ m map[E]struct{}
+}
+
+func New[E comparable]() *Set[E] {
+ return &Set[E]{
+ m: map[E]struct{}{},
+ }
+}
+
+func NewWithValues[E comparable](values ...E) *Set[E] {
+ s := New[E]()
+ for _, v := range values {
+ s.Add(v)
+ }
+ return s
+}
+
+func (s *Set[E]) Add(v E) {
+ s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+func (s *Set[E]) AddSlice(slice []E) {
+ for _, v := range slice {
+ s.Add(v)
+ }
+}
+
+func (s *Set[E]) Delete(v E) {
+ delete(s.m, v)
+}
+
+func (s *Set[E]) Contains(v E) bool {
+ _, ok := s.m[v]
+ return ok
+}
+
+func (s *Set[E]) Empty() bool {
+ return len(s.m) == 0
+}
+
+func (s *Set[E]) Values() []E {
+ return maps.Keys(s.m)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go
new file mode 100644
index 000000000..6f95115a1
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go
@@ -0,0 +1,102 @@
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// FIXME FIXME: MIME type? Int? String?
+// An interface with a name, parse methods?
+type FormatID string
+
+const (
+ SimpleSigningFormat FormatID = "simple-signing"
+ SigstoreFormat FormatID = "sigstore-json"
+ // Update also UnsupportedFormatError below
+)
+
+// Signature is an image signature of some kind.
+type Signature interface {
+ FormatID() FormatID
+ // blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+ // Almost everyone should use signature.Blob() instead.
+ blobChunk() ([]byte, error)
+}
+
+// Blob returns a representation of sig as a []byte, suitable for long-term storage.
+func Blob(sig Signature) ([]byte, error) {
+ chunk, err := sig.blobChunk()
+ if err != nil {
+ return nil, err
+ }
+
+ format := sig.FormatID()
+ switch format {
+ case SimpleSigningFormat:
+ // For compatibility with old dir formats:
+ return chunk, nil
+ default:
+ res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text.
+ res = append(res, []byte(format)...)
+ res = append(res, '\n')
+ res = append(res, chunk...)
+ return res, nil
+ }
+}
+
+// FromBlob returns a signature from parsing a blob created by signature.Blob.
+func FromBlob(blob []byte) (Signature, error) {
+ if len(blob) == 0 {
+ return nil, errors.New("empty signature blob")
+ }
+ // Historically we’ve just been using GPG with no identification; try to auto-detect that.
+ switch blob[0] {
+ // OpenPGP "compressed data" wrapping the message
+ case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any)
+ 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet)
+ // OpenPGP “one-pass signature†starting a signature
+ 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any)
+ 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet)
+ // OpenPGP signature packet signing the following data
+ 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any)
+ 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet)
+ return SimpleSigningFromBlob(blob), nil
+
+ // The newer format: binary 0, format name, newline, data
+ case 0x00:
+ blob = blob[1:]
+ formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
+ if !foundNewline {
+ return nil, fmt.Errorf("invalid signature format, missing newline")
+ }
+ for _, b := range formatBytes {
+ if b < 32 || b >= 0x7F {
+ return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
+ }
+ }
+ switch {
+ case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
+ return SimpleSigningFromBlob(blobChunk), nil
+ case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
+ return sigstoreFromBlobChunk(blobChunk)
+ default:
+ return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
+ }
+
+ default:
+ return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0])
+ }
+
+}
+
+// UnsupportedFormatError returns an error complaining about sig having an unsupported format.
+func UnsupportedFormatError(sig Signature) error {
+ formatID := sig.FormatID()
+ switch formatID {
+ case SimpleSigningFormat, SigstoreFormat:
+ return fmt.Errorf("unsupported signature format %s", string(formatID))
+ default:
+ return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
new file mode 100644
index 000000000..8025cd270
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
@@ -0,0 +1,86 @@
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "maps"
+)
+
+const (
+ // from sigstore/cosign/pkg/types.SimpleSigningMediaType
+ SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
+ // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
+ SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
+ // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey
+ SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle"
+ // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey
+ SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate"
+ // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey
+ SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain"
+)
+
+// Sigstore is a github.com/cosign/cosign signature.
+// For the persistent-storage format used for blobChunk(), we want
+// a degree of forward compatibility against unexpected field changes
+// (as has happened before), which is why this data type
+// contains just a payload + annotations (including annotations
+// that we don’t recognize or support), instead of individual fields
+// for the known annotations.
+type Sigstore struct {
+ untrustedMIMEType string
+ untrustedPayload []byte
+ untrustedAnnotations map[string]string
+}
+
+// sigstoreJSONRepresentation needs the files to be public, which we don’t want for
+// the main Sigstore type.
+type sigstoreJSONRepresentation struct {
+ UntrustedMIMEType string `json:"mimeType"`
+ UntrustedPayload []byte `json:"payload"`
+ UntrustedAnnotations map[string]string `json:"annotations"`
+}
+
+// SigstoreFromComponents returns a Sigstore object from its components.
+func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
+ return Sigstore{
+ untrustedMIMEType: untrustedMimeType,
+ untrustedPayload: bytes.Clone(untrustedPayload),
+ untrustedAnnotations: maps.Clone(untrustedAnnotations),
+ }
+}
+
+// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
+func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
+ var v sigstoreJSONRepresentation
+ if err := json.Unmarshal(blobChunk, &v); err != nil {
+ return Sigstore{}, err
+ }
+ return SigstoreFromComponents(v.UntrustedMIMEType,
+ v.UntrustedPayload,
+ v.UntrustedAnnotations), nil
+}
+
+func (s Sigstore) FormatID() FormatID {
+ return SigstoreFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s Sigstore) blobChunk() ([]byte, error) {
+ return json.Marshal(sigstoreJSONRepresentation{
+ UntrustedMIMEType: s.UntrustedMIMEType(),
+ UntrustedPayload: s.UntrustedPayload(),
+ UntrustedAnnotations: s.UntrustedAnnotations(),
+ })
+}
+
+func (s Sigstore) UntrustedMIMEType() string {
+ return s.untrustedMIMEType
+}
+func (s Sigstore) UntrustedPayload() []byte {
+ return bytes.Clone(s.untrustedPayload)
+}
+
+func (s Sigstore) UntrustedAnnotations() map[string]string {
+ return maps.Clone(s.untrustedAnnotations)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go
new file mode 100644
index 000000000..76f270b48
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go
@@ -0,0 +1,29 @@
+package signature
+
+import "bytes"
+
+// SimpleSigning is a “simple signing†signature.
+type SimpleSigning struct {
+ untrustedSignature []byte
+}
+
+// SimpleSigningFromBlob converts a “simple signing†signature into a SimpleSigning object.
+func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
+ return SimpleSigning{
+ untrustedSignature: bytes.Clone(blobChunk),
+ }
+}
+
+func (s SimpleSigning) FormatID() FormatID {
+ return SimpleSigningFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s SimpleSigning) blobChunk() ([]byte, error) {
+ return bytes.Clone(s.untrustedSignature), nil
+}
+
+func (s SimpleSigning) UntrustedSignature() []byte {
+ return bytes.Clone(s.untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signer/signer.go b/vendor/github.com/containers/image/v5/internal/signer/signer.go
new file mode 100644
index 000000000..5720254d1
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signer/signer.go
@@ -0,0 +1,47 @@
+package signer
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+)
+
+// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// This type is visible to external callers, so it has no public fields or methods apart from Close().
+//
+// The owner of a Signer must call Close() when done.
+type Signer struct {
+ implementation SignerImplementation
+}
+
+// NewSigner creates a public Signer from a SignerImplementation
+func NewSigner(impl SignerImplementation) *Signer {
+ return &Signer{implementation: impl}
+}
+
+func (s *Signer) Close() error {
+ return s.implementation.Close()
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+// Alternatively, should SignImageManifest be provided a logging writer of some kind?
+func ProgressMessage(signer *Signer) string {
+ return signer.implementation.ProgressMessage()
+}
+
+// SignImageManifest invokes a SignerImplementation.
+// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage.
+func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) {
+ return signer.implementation.SignImageManifest(ctx, manifest, dockerReference)
+}
+
+// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// This interface is distinct from Signer so that implementations can be created outside of this package.
+type SignerImplementation interface {
+ // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+ ProgressMessage() string
+ // SignImageManifest creates a new signature for manifest m as dockerReference.
+ SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error)
+ Close() error
+}
diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
new file mode 100644
index 000000000..d5a5436a4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
@@ -0,0 +1,40 @@
+package streamdigest
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+)
+
+// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo.
+// The temporary file is returned as an io.Reader along with a cleanup function.
+// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
+// If an error occurs, inputInfo is not modified.
+func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
+ diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob")
+ if err != nil {
+ return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
+ }
+ cleanup := func() {
+ diskBlob.Close()
+ os.Remove(diskBlob.Name())
+ }
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
+ written, err := io.Copy(diskBlob, stream)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err)
+ }
+ _, err = diskBlob.Seek(0, io.SeekStart)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err)
+ }
+ inputInfo.Digest = digester.Digest()
+ inputInfo.Size = written
+ return diskBlob, cleanup, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
index 809446e18..bab73ee33 100644
--- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
+++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
@@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
// DO NOT change this, instead see unixTempDirForBigFiles above.
const builtinUnixTempDirForBigFiles = "/var/tmp"
+const prefix = "container_images_"
+
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
// which on systemd based systems could be the unsuitable tmpfs filesystem.
-func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
+func temporaryDirectoryForBigFiles(sys *types.SystemContext) string {
if sys != nil && sys.BigFilesTemporaryDir != "" {
return sys.BigFilesTemporaryDir
}
@@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
}
return temporaryDirectoryForBigFiles
}
+
+func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) {
+ return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
+}
+
+func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) {
+ return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go
deleted file mode 100644
index bf89a69b8..000000000
--- a/vendor/github.com/containers/image/v5/internal/types/types.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package types
-
-import (
- "context"
- "io"
-
- "github.com/containers/image/v5/docker/reference"
- publicTypes "github.com/containers/image/v5/types"
-)
-
-// ImageDestinationWithOptions is an internal extension to the ImageDestination
-// interface.
-type ImageDestinationWithOptions interface {
- publicTypes.ImageDestination
-
- // PutBlobWithOptions is a wrapper around PutBlob. If
- // options.LayerIndex is set, the blob will be committed directly.
- // Either by the calling goroutine or by another goroutine already
- // committing layers.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error)
-
- // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
- // options.LayerIndex is set, the reused blob will be recoreded as
- // already pulled.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error)
-}
-
-// PutBlobOptions are used in PutBlobWithOptions.
-type PutBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Denotes whether the blob is a config or not.
- IsConfig bool
- // The corresponding index in the layer slice.
- LayerIndex *int
-}
-
-// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
-type TryReusingBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Use an equivalent of the desired blob.
- CanSubstitute bool
- // The corresponding index in the layer slice.
- LayerIndex *int
- // The reference of the image that contains the target blob.
- SrcRef reference.Named
-}
diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
new file mode 100644
index 000000000..fe65b1a98
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
@@ -0,0 +1,38 @@
+package unparsedimage
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// wrapped provides the private.UnparsedImage operations
+// for an object that only implements types.UnparsedImage
+type wrapped struct {
+ types.UnparsedImage
+}
+
+// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API
+func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage {
+ if unparsed2, ok := unparsed.(private.UnparsedImage); ok {
+ return unparsed2
+ }
+ return &wrapped{
+ UnparsedImage: unparsed,
+ }
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ sigs, err := w.Signatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
index 6aa9ead68..b95370af7 100644
--- a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
+++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
@@ -11,7 +11,7 @@ import (
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
// and it is possible for the server to return a response (typically an error) before consuming
// the full body of the request. In that case http.Client.Do can return with an error while
-// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
+// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
//
// As a result, any data used/updated by the io.Reader() provided as the request body may be
// used/updated even after http.Client.Do returns, causing races.
diff --git a/vendor/github.com/containers/image/v5/internal/useragent/useragent.go b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go
new file mode 100644
index 000000000..7ac49693e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go
@@ -0,0 +1,6 @@
+package useragent
+
+import "github.com/containers/image/v5/version"
+
+// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise.
+var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 3ece948a0..8d9d5795f 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -3,35 +3,11 @@ package manifest
import (
"fmt"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
-// dupStringSlice returns a deep copy of a slice of strings, or nil if the
-// source slice is empty.
-func dupStringSlice(list []string) []string {
- if len(list) == 0 {
- return nil
- }
- dup := make([]string, len(list))
- copy(dup, list)
- return dup
-}
-
-// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
-// passed-in map is nil or has no keys.
-func dupStringStringMap(m map[string]string) map[string]string {
- if len(m) == 0 {
- return nil
- }
- result := make(map[string]string)
- for k, v := range m {
- result[k] = v
- }
- return result
-}
-
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func layerInfosToStrings(infos []LayerInfo) []string {
@@ -44,13 +20,25 @@ func layerInfosToStrings(infos []LayerInfo) []string {
// compressionMIMETypeSet describes a set of MIME type “variants†that represent differently-compressed
// versions of “the same kind of contentâ€.
-// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
+// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed;
// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
type compressionMIMETypeSet map[string]string
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupportedâ€
+// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
+func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType {
+ return variants
+ }
+ }
+ }
+ return nil
+}
+
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
@@ -59,38 +47,35 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
// differ from it only in what type of compression has been applied, the returned error will not be
// a ManifestLayerCompressionIncompatibilityError.
-func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
- for _, variants := range variantTable {
- for _, mt := range variants {
- if mt == mimeType { // Found the variant
- name := mtsUncompressed
- if algorithm != nil {
- name = algorithm.Name()
- }
- if res, ok := variants[name]; ok {
- if res != mtsUnsupportedMIMEType {
- return res, nil
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
- }
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
- }
- // We can't very well say “the idea of no compression is unknownâ€
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ if variants != nil {
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.BaseVariantName()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
}
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
+ }
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
+ }
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)}
}
+ // We can't very well say “the idea of no compression is unknownâ€
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if algorithm != nil {
- return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
+ return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType)
}
- return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
+ return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType)
}
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
@@ -142,3 +127,26 @@ type ManifestLayerCompressionIncompatibilityError struct {
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text
}
+
+// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
+// Note that the caller still needs to worry about a specific algorithm not being supported.
+func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return false
+ }
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
+}
+
+// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
+ layers := make([]types.ImageInspectLayer, len(infos))
+ for i, info := range infos {
+ layers[i].MIMEType = info.MediaType
+ layers[i].Digest = info.Digest
+ layers[i].Size = info.Size
+ layers[i].Annotations = info.Annotations
+ }
+ return layers
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 58527d713..b74a1e240 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -2,15 +2,20 @@ package manifest
import (
"encoding/json"
- "regexp"
+ "errors"
+ "fmt"
+ "slices"
"strings"
"time"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/set"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@@ -52,13 +57,17 @@ type Schema1V1Compatibility struct {
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
// (NOTE: The instance is not necessary a literal representation of the original blob,
// layers with duplicate IDs are eliminated.)
-func Schema1FromManifest(manifest []byte) (*Schema1, error) {
+func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
s1 := Schema1{}
- if err := json.Unmarshal(manifest, &s1); err != nil {
+ if err := json.Unmarshal(manifestBlob, &s1); err != nil {
return nil, err
}
if s1.SchemaVersion != 1 {
- return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
+ manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
+ return nil, err
}
if err := s1.initialize(); err != nil {
return nil, err
@@ -109,7 +118,7 @@ func (m *Schema1) initialize() error {
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
for i, h := range m.History {
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
- return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i)
+ return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err)
}
}
return nil
@@ -134,18 +143,35 @@ func (m *Schema1) LayerInfos() []LayerInfo {
return layers
}
+const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets
+var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: fakeSchema1MIMEType,
+ compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
+ },
+}
+
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
if len(m.FSLayers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
}
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
for i, info := range layerInfos {
+ // There are no MIME types in schema1, but we do a “conversion†here to reject unsupported compression algorithms,
+ // in a way that is consistent with the other schema implementations.
+ if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil {
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
+ }
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
+ if info.CryptoOperation != types.PreserveOriginalCrypto {
+ return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest)
+ }
}
return nil
}
@@ -178,34 +204,34 @@ func (m *Schema1) fixManifestLayers() error {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
- idmap := make(map[string]struct{})
+ idmap := set.New[string]()
var lastID string
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
- if _, exists := idmap[img.ID]; img.ID != lastID && exists {
- return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ if img.ID != lastID && idmap.Contains(img.ID) {
+ return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
- idmap[lastID] = struct{}{}
+ idmap.Add(lastID)
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
- m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
- m.History = append(m.History[:i], m.History[i+1:]...)
- m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
+ m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
+ m.History = slices.Delete(m.History, i, i+1)
+ m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
- return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
+ return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}
}
return nil
}
-var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+var validHex = regexp.Delayed(`^([a-f0-9]{64})$`)
func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok {
- return errors.Errorf("image ID %q is invalid", id)
+ return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}
@@ -216,13 +242,17 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: m.Tag,
Created: &s1.Created,
DockerVersion: s1.DockerVersion,
Architecture: s1.Architecture,
+ Variant: s1.Variant,
Os: s1.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s1.Author,
}
if s1.Config != nil {
i.Labels = s1.Config.Labels
@@ -242,14 +272,14 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
config := []byte(m.History[0].V1Compatibility)
err := json.Unmarshal(config, &s1)
if err != nil {
- return nil, errors.Wrapf(err, "error decoding configuration")
+ return nil, fmt.Errorf("decoding configuration: %w", err)
}
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
// adding some fields that aren't "omitempty".
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
config, err = json.Marshal(&s1)
if err != nil {
- return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err)
}
}
// Build the history.
@@ -276,7 +306,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
raw := make(map[string]*json.RawMessage)
err = json.Unmarshal(config, &raw)
if err != nil {
- return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err)
}
// Drop some fields.
delete(raw, "id")
@@ -288,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
// Add the history and rootfs information.
rootfs, err := json.Marshal(rootFS)
if err != nil {
- return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err)
}
rawRootfs := json.RawMessage(rootfs)
raw["rootfs"] = &rawRootfs
history, err := json.Marshal(convertedHistory)
if err != nil {
- return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err)
}
rawHistory := json.RawMessage(history)
raw["history"] = &rawHistory
// Encode the result.
config, err = json.Marshal(raw)
if err != nil {
- return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+ return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err)
}
return config, nil
}
@@ -312,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
if err != nil {
return "", err
}
- return digest.FromBytes(image).Hex(), nil
+ return digest.FromBytes(image).Encoded(), nil
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 6cb605263..7e53f4f54 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -5,20 +5,15 @@ import (
"fmt"
"time"
- "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/internal/manifest"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema2Descriptor is a “descriptor†in docker/distribution schema 2.
-type Schema2Descriptor struct {
- MediaType string `json:"mediaType"`
- Size int64 `json:"size"`
- Digest digest.Digest `json:"digest"`
- URLs []string `json:"urls,omitempty"`
-}
+type Schema2Descriptor = manifest.Schema2Descriptor
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
@@ -59,9 +54,10 @@ type Schema2HealthConfig struct {
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
- StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
+ StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
@@ -160,9 +156,13 @@ type Schema2Image struct {
}
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
-func Schema2FromManifest(manifest []byte) (*Schema2, error) {
+func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
s2 := Schema2{}
- if err := json.Unmarshal(manifest, &s2); err != nil {
+ if err := json.Unmarshal(manifestBlob, &s2); err != nil {
+ return nil, err
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
+ manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
// Check manifest's and layers' media types.
@@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *Schema2) LayerInfos() []LayerInfo {
- blobs := []LayerInfo{}
+ blobs := make([]LayerInfo, 0, len(m.LayersDescriptors))
for _, layer := range m.LayersDescriptors {
blobs = append(blobs, LayerInfo{
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
@@ -214,14 +214,14 @@ func (m *Schema2) LayerInfos() []LayerInfo {
var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
- compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
{
- mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
- compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
}
@@ -230,7 +230,7 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
// CompressionAlgorithm that would result in anything other than gzip compression.
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.LayersDescriptors) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
}
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
@@ -242,12 +242,15 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
m.LayersDescriptors[i].MediaType = mimeType
m.LayersDescriptors[i].Digest = info.Digest
m.LayersDescriptors[i].Size = info.Size
m.LayersDescriptors[i].URLs = info.URLs
+ if info.CryptoOperation != types.PreserveOriginalCrypto {
+ return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest)
+ }
}
return nil
}
@@ -268,6 +271,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
if err := json.Unmarshal(config, s2); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: &s2.Created,
@@ -275,7 +279,9 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
Architecture: s2.Architecture,
Variant: s2.Variant,
Os: s2.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s2.Author,
}
if s2.Config != nil {
i.Labels = s2.Config.Labels
@@ -289,5 +295,13 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
return "", err
}
- return m.ConfigDescriptor.Digest.Hex(), nil
+ return m.ConfigDescriptor.Digest.Encoded(), nil
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
+ return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
index bfedff69c..c958a3fa3 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -1,217 +1,32 @@
package manifest
import (
- "encoding/json"
- "fmt"
-
- platform "github.com/containers/image/v5/internal/pkg/platform"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
+ "github.com/containers/image/v5/internal/manifest"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
-type Schema2PlatformSpec struct {
- Architecture string `json:"architecture"`
- OS string `json:"os"`
- OSVersion string `json:"os.version,omitempty"`
- OSFeatures []string `json:"os.features,omitempty"`
- Variant string `json:"variant,omitempty"`
- Features []string `json:"features,omitempty"` // removed in OCI
-}
+type Schema2PlatformSpec = manifest.Schema2PlatformSpec
// Schema2ManifestDescriptor references a platform-specific manifest.
-type Schema2ManifestDescriptor struct {
- Schema2Descriptor
- Platform Schema2PlatformSpec `json:"platform"`
-}
+type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
// Schema2List is a list of platform-specific manifests.
-type Schema2List struct {
- SchemaVersion int `json:"schemaVersion"`
- MediaType string `json:"mediaType"`
- Manifests []Schema2ManifestDescriptor `json:"manifests"`
-}
-
-// MIMEType returns the MIME type of this particular manifest list.
-func (list *Schema2List) MIMEType() string {
- return list.MediaType
-}
-
-// Instances returns a slice of digests of the manifests that this list knows of.
-func (list *Schema2List) Instances() []digest.Digest {
- results := make([]digest.Digest, len(list.Manifests))
- for i, m := range list.Manifests {
- results[i] = m.Digest
- }
- return results
-}
-
-// Instance returns the ListUpdate of a particular instance in the list.
-func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
- for _, manifest := range list.Manifests {
- if manifest.Digest == instanceDigest {
- return ListUpdate{
- Digest: manifest.Digest,
- Size: manifest.Size,
- MediaType: manifest.MediaType,
- }, nil
- }
- }
- return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
-}
-
-// UpdateInstances updates the sizes, digests, and media types of the manifests
-// which the list catalogs.
-func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
- if len(updates) != len(list.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
- }
- for i := range updates {
- if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates))
- }
- list.Manifests[i].Digest = updates[i].Digest
- if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
- }
- list.Manifests[i].Size = updates[i].Size
- if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
- }
- list.Manifests[i].MediaType = updates[i].MediaType
- }
- return nil
-}
-
-// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
-// of the image which is appropriate for the current environment.
-func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
- wantedPlatforms, err := platform.WantedPlatforms(ctx)
- if err != nil {
- return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
- }
- for _, wantedPlatform := range wantedPlatforms {
- for _, d := range list.Manifests {
- imagePlatform := imgspecv1.Platform{
- Architecture: d.Platform.Architecture,
- OS: d.Platform.OS,
- OSVersion: d.Platform.OSVersion,
- OSFeatures: dupStringSlice(d.Platform.OSFeatures),
- Variant: d.Platform.Variant,
- }
- if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
- return d.Digest, nil
- }
- }
- }
- return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
-}
-
-// Serialize returns the list in a blob format.
-// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
-func (list *Schema2List) Serialize() ([]byte, error) {
- buf, err := json.Marshal(list)
- if err != nil {
- return nil, errors.Wrapf(err, "error marshaling Schema2List %#v", list)
- }
- return buf, nil
-}
+type Schema2List = manifest.Schema2ListPublic
// Schema2ListFromComponents creates a Schema2 manifest list instance from the
// supplied data.
func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
- list := Schema2List{
- SchemaVersion: 2,
- MediaType: DockerV2ListMediaType,
- Manifests: make([]Schema2ManifestDescriptor, len(components)),
- }
- for i, component := range components {
- m := Schema2ManifestDescriptor{
- Schema2Descriptor{
- MediaType: component.MediaType,
- Size: component.Size,
- Digest: component.Digest,
- URLs: dupStringSlice(component.URLs),
- },
- Schema2PlatformSpec{
- Architecture: component.Platform.Architecture,
- OS: component.Platform.OS,
- OSVersion: component.Platform.OSVersion,
- OSFeatures: dupStringSlice(component.Platform.OSFeatures),
- Variant: component.Platform.Variant,
- Features: dupStringSlice(component.Platform.Features),
- },
- }
- list.Manifests[i] = m
- }
- return &list
+ return manifest.Schema2ListPublicFromComponents(components)
}
// Schema2ListClone creates a deep copy of the passed-in list.
func Schema2ListClone(list *Schema2List) *Schema2List {
- return Schema2ListFromComponents(list.Manifests)
-}
-
-// ToOCI1Index returns the list encoded as an OCI1 index.
-func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
- components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
- for _, manifest := range list.Manifests {
- converted := imgspecv1.Descriptor{
- MediaType: manifest.MediaType,
- Size: manifest.Size,
- Digest: manifest.Digest,
- URLs: dupStringSlice(manifest.URLs),
- Platform: &imgspecv1.Platform{
- OS: manifest.Platform.OS,
- Architecture: manifest.Platform.Architecture,
- OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
- OSVersion: manifest.Platform.OSVersion,
- Variant: manifest.Platform.Variant,
- },
- }
- components = append(components, converted)
- }
- oci := OCI1IndexFromComponents(components, nil)
- return oci, nil
-}
-
-// ToSchema2List returns the list encoded as a Schema2 list.
-func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
- return Schema2ListClone(list), nil
+ return manifest.Schema2ListPublicClone(list)
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
-func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
- list := Schema2List{
- Manifests: []Schema2ManifestDescriptor{},
- }
- if err := json.Unmarshal(manifest, &list); err != nil {
- return nil, errors.Wrapf(err, "error unmarshaling Schema2List %q", string(manifest))
- }
- return &list, nil
-}
-
-// Clone returns a deep copy of this list and its contents.
-func (list *Schema2List) Clone() List {
- return Schema2ListClone(list)
-}
-
-// ConvertToMIMEType converts the passed-in manifest list to a manifest
-// list of the specified type.
-func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
- switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
- case DockerV2ListMediaType:
- return list.Clone(), nil
- case imgspecv1.MediaTypeImageIndex:
- return list.ToOCI1Index()
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
- return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
- default:
- // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
- }
+func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
+ return manifest.Schema2ListPublicFromManifest(manifestBlob)
}
diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go
index 58982597e..1d6fdc9f5 100644
--- a/vendor/github.com/containers/image/v5/manifest/list.go
+++ b/vendor/github.com/containers/image/v5/manifest/list.go
@@ -1,10 +1,7 @@
package manifest
import (
- "fmt"
-
- "github.com/containers/image/v5/types"
- digest "github.com/opencontainers/go-digest"
+ "github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -21,56 +18,14 @@ var (
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
-type List interface {
- // MIMEType returns the MIME type of this particular manifest list.
- MIMEType() string
-
- // Instances returns a list of the manifests that this list knows of, other than its own.
- Instances() []digest.Digest
-
- // Update information about the list's instances. The length of the passed-in slice must
- // match the length of the list of instances which the list already contains, and every field
- // must be specified.
- UpdateInstances([]ListUpdate) error
-
- // Instance returns the size and MIME type of a particular instance in the list.
- Instance(digest.Digest) (ListUpdate, error)
-
- // ChooseInstance selects which manifest is most appropriate for the platform described by the
- // SystemContext, or for the current platform if the SystemContext doesn't specify any details.
- ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
-
- // Serialize returns the list in a blob format.
- // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
- // from, even if no modifications were made!
- Serialize() ([]byte, error)
-
- // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
- ConvertToMIMEType(mimeType string) (List, error)
-
- // Clone returns a deep copy of this list and its contents.
- Clone() List
-}
+type List = manifest.ListPublic
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
-type ListUpdate struct {
- Digest digest.Digest
- Size int64
- MediaType string
-}
+type ListUpdate = manifest.ListUpdate
// ListFromBlob parses a list of manifests.
-func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
- normalized := NormalizedMIMEType(manifestMIMEType)
- switch normalized {
- case DockerV2ListMediaType:
- return Schema2ListFromManifest(manifest)
- case imgspecv1.MediaTypeImageIndex:
- return OCI1IndexFromManifest(manifest)
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
- return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
- }
- return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
+func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
+ return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
}
// ConvertListToMIMEType converts the passed-in manifest list to a manifest
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 32680e09d..d8f37eb45 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -1,9 +1,9 @@
package manifest
import (
- "encoding/json"
"fmt"
+ "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@@ -15,25 +15,29 @@ import (
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
- DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
- // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
- DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
+ // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
- DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
- DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+ DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
- DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
- DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
+ DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
- DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ DockerV2ListMediaType = manifest.DockerV2ListMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
- DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
+ DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
- DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+ DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
)
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image†in the standard sense (e.g. an OCI artifact)
+type NonImageArtifactError = manifest.NonImageArtifactError
+
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
switch m {
@@ -97,105 +101,25 @@ type LayerInfo struct {
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
-func GuessMIMEType(manifest []byte) string {
- // A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
- // Also docker/distribution/manifest.Versioned.
- meta := struct {
- MediaType string `json:"mediaType"`
- SchemaVersion int `json:"schemaVersion"`
- Signatures interface{} `json:"signatures"`
- }{}
- if err := json.Unmarshal(manifest, &meta); err != nil {
- return ""
- }
-
- switch meta.MediaType {
- case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type.
- return meta.MediaType
- }
- // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
- switch meta.SchemaVersion {
- case 1:
- if meta.Signatures != nil {
- return DockerV2Schema1SignedMediaType
- }
- return DockerV2Schema1MediaType
- case 2:
- // best effort to understand if this is an OCI image since mediaType
- // isn't in the manifest for OCI anymore
- // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
- ociMan := struct {
- Config struct {
- MediaType string `json:"mediaType"`
- } `json:"config"`
- }{}
- if err := json.Unmarshal(manifest, &ociMan); err != nil {
- return ""
- }
- switch ociMan.Config.MediaType {
- case imgspecv1.MediaTypeImageConfig:
- return imgspecv1.MediaTypeImageManifest
- case DockerV2Schema2ConfigMediaType:
- // This case should not happen since a Docker image
- // must declare a top-level media type and
- // `meta.MediaType` has already been checked.
- return DockerV2Schema2MediaType
- }
- // Maybe an image index or an OCI artifact.
- ociIndex := struct {
- Manifests []imgspecv1.Descriptor `json:"manifests"`
- }{}
- if err := json.Unmarshal(manifest, &ociIndex); err != nil {
- return ""
- }
- if len(ociIndex.Manifests) != 0 {
- if ociMan.Config.MediaType == "" {
- return imgspecv1.MediaTypeImageIndex
- }
- // FIXME: this is mixing media types of manifests and configs.
- return ociMan.Config.MediaType
- }
- // It's most likely an OCI artifact with a custom config media
- // type which is not (and cannot) be covered by the media-type
- // checks cabove.
- return imgspecv1.MediaTypeImageManifest
- }
- return ""
+func GuessMIMEType(manifestBlob []byte) string {
+ return manifest.GuessMIMEType(manifestBlob)
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
-func Digest(manifest []byte) (digest.Digest, error) {
- if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
- sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
- if err != nil {
- return "", err
- }
- manifest, err = sig.Payload()
- if err != nil {
- // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
- // that libtrust itself has josebase64UrlEncode()d
- return "", err
- }
- }
-
- return digest.FromBytes(manifest), nil
+func Digest(manifestBlob []byte) (digest.Digest, error) {
+ return manifest.Digest(manifestBlob)
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
-func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
- // This should eventually support various digest types.
- actualDigest, err := Digest(manifest)
- if err != nil {
- return false, err
- }
- return expectedDigest == actualDigest, nil
+func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
+ return manifest.MatchesDigest(manifestBlob, expectedDigest)
}
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
-// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
+// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature).
func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
key, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
@@ -225,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool {
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
- switch input {
- // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
- // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
- // need to happen within the ImageSource.
- case "application/json":
- return DockerV2Schema1SignedMediaType
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
- imgspecv1.MediaTypeImageManifest,
- imgspecv1.MediaTypeImageIndex,
- DockerV2Schema2MediaType,
- DockerV2ListMediaType:
- return input
- default:
- // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
- // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
- // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
- //
- // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized†in the tag.
- // This makes no real sense, but it happens
- // because requests for manifests are
- // redirected to a content distribution
- // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
- return DockerV2Schema1SignedMediaType
- }
+ return manifest.NormalizedMIMEType(input)
}
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
@@ -265,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
}
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt)
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index c6299d8e6..0faa866b7 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -3,15 +3,16 @@ package manifest
import (
"encoding/json"
"fmt"
+ "slices"
"strings"
- "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/internal/manifest"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@@ -41,7 +42,12 @@ type OCI1 struct {
// useful for validation anyway.
func SupportedOCI1MediaType(m string) error {
switch m {
- case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
+ case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig,
+ imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd,
+ imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeLayoutHeader,
+ ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
return nil
default:
return fmt.Errorf("unsupported OCIv1 media type: %q", m)
@@ -49,9 +55,13 @@ func SupportedOCI1MediaType(m string) error {
}
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
-func OCI1FromManifest(manifest []byte) (*OCI1, error) {
+func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
oci1 := OCI1{}
- if err := json.Unmarshal(manifest, &oci1); err != nil {
+ if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
+ return nil, err
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest,
+ manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
return &oci1, nil
@@ -62,6 +72,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript
return &OCI1{
imgspecv1.Manifest{
Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
Config: config,
Layers: layers,
},
@@ -84,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *OCI1) LayerInfos() []LayerInfo {
- blobs := []LayerInfo{}
+ blobs := make([]LayerInfo, 0, len(m.Layers))
for _, layer := range m.Layers {
blobs = append(blobs, LayerInfo{
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
@@ -96,23 +107,29 @@ func (m *OCI1) LayerInfos() []LayerInfo {
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
},
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayer,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd,
},
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI.
+//
+// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on
+// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
+// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
+// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
+// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
}
original := m.Layers
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
@@ -127,7 +144,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
if info.CryptoOperation == types.Encrypt {
encMediaType, err := getEncryptedMediaType(mimeType)
@@ -146,6 +163,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
return nil
}
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support encryption
+func getEncryptedMediaType(mediatype string) (string, error) {
+ if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
+ return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
+ }
+ unsuffixedMediatype := strings.Split(mediatype, "+")[0]
+ switch unsuffixedMediatype {
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
+ imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ return mediatype + "+encrypted", nil
+ }
+
+ return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype)
+}
+
+// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support decryption
+func getDecryptedMediaType(mediatype string) (string, error) {
+ res, ok := strings.CutSuffix(mediatype, "+encrypted")
+ if !ok {
+ return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype)
+ }
+
+ return res, nil
+}
+
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) {
@@ -154,6 +198,14 @@ func (m *OCI1) Serialize() ([]byte, error) {
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos.
+ // Most software calling this without human intervention is going to expect the values to be realistic and relevant,
+ // and is probably better served by failing; we can always re-visit that later if we fail now, but
+ // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
+ return nil, manifest.NewNonImageArtifactError(&m.Manifest)
+ }
+
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
@@ -166,50 +218,58 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
if err := json.Unmarshal(config, d1); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: v1.Created,
DockerVersion: d1.DockerVersion,
Labels: v1.Config.Labels,
Architecture: v1.Architecture,
+ Variant: v1.Variant,
Os: v1.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
Env: v1.Config.Env,
+ Author: v1.Author,
}
return i, nil
}
// ImageID computes an ID which can uniquely identify this image by its contents.
-func (m *OCI1) ImageID([]digest.Digest) (string, error) {
- if err := m.Config.Digest.Validate(); err != nil {
- return "", err
+func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
+ // The way m.Config.Digest “uniquely identifies†an image is
+ // by containing RootFS.DiffIDs, which identify the layers of the image.
+ // For non-image artifacts, the we can’t expect the config to change
+ // any time the other layers (semantically) change, so this approach of
+ // distinguishing objects only by m.Config.Digest doesn’t work in general.
+ //
+ // Any caller of this method presumably wants to disambiguate the same
+ // images with a different representation, but doesn’t want to disambiguate
+ // representations (by using a manifest digest). So, submitting a non-image
+ // artifact to such a caller indicates an expectation mismatch.
+ // So, we just fail here instead of inventing some other ID value (e.g.
+ // by combining the config and blob layer digests). That still
+ // gives us the option to not fail, and return some value, in the future,
+ // without committing to that approach now.
+ // (The only known caller of ImageID is storage/storageImageDestination.computeID,
+ // which can’t work with non-image artifacts.)
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return "", manifest.NewNonImageArtifactError(&m.Manifest)
}
- return m.Config.Digest.Hex(), nil
-}
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support encryption
-func getEncryptedMediaType(mediatype string) (string, error) {
- for _, s := range strings.Split(mediatype, "+")[1:] {
- if s == "encrypted" {
- return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
- }
- }
- unsuffixedMediatype := strings.Split(mediatype, "+")[0]
- switch unsuffixedMediatype {
- case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
- return mediatype + "+encrypted", nil
+ if err := m.Config.Digest.Validate(); err != nil {
+ return "", err
}
-
- return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
+ return m.Config.Digest.Encoded(), nil
}
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support decryption
-func getDecryptedMediaType(mediatype string) (string, error) {
- if !strings.HasSuffix(mediatype, "+encrypted") {
- return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return false
}
-
- return strings.TrimSuffix(mediatype, "+encrypted"), nil
+ return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
index 7bdea8fb2..193b08935 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -1,227 +1,27 @@
package manifest
import (
- "encoding/json"
- "fmt"
- "runtime"
-
- platform "github.com/containers/image/v5/internal/pkg/platform"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspec "github.com/opencontainers/image-spec/specs-go"
+ "github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
// provide methods for.
-type OCI1Index struct {
- imgspecv1.Index
-}
-
-// MIMEType returns the MIME type of this particular manifest index.
-func (index *OCI1Index) MIMEType() string {
- return imgspecv1.MediaTypeImageIndex
-}
-
-// Instances returns a slice of digests of the manifests that this index knows of.
-func (index *OCI1Index) Instances() []digest.Digest {
- results := make([]digest.Digest, len(index.Manifests))
- for i, m := range index.Manifests {
- results[i] = m.Digest
- }
- return results
-}
-
-// Instance returns the ListUpdate of a particular instance in the index.
-func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
- for _, manifest := range index.Manifests {
- if manifest.Digest == instanceDigest {
- return ListUpdate{
- Digest: manifest.Digest,
- Size: manifest.Size,
- MediaType: manifest.MediaType,
- }, nil
- }
- }
- return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
-}
-
-// UpdateInstances updates the sizes, digests, and media types of the manifests
-// which the list catalogs.
-func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
- if len(updates) != len(index.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
- }
- for i := range updates {
- if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates))
- }
- index.Manifests[i].Digest = updates[i].Digest
- if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
- }
- index.Manifests[i].Size = updates[i].Size
- if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
- }
- index.Manifests[i].MediaType = updates[i].MediaType
- }
- return nil
-}
-
-// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
-// of the image which is appropriate for the current environment.
-func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
- wantedPlatforms, err := platform.WantedPlatforms(ctx)
- if err != nil {
- return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
- }
- for _, wantedPlatform := range wantedPlatforms {
- for _, d := range index.Manifests {
- if d.Platform == nil {
- continue
- }
- imagePlatform := imgspecv1.Platform{
- Architecture: d.Platform.Architecture,
- OS: d.Platform.OS,
- OSVersion: d.Platform.OSVersion,
- OSFeatures: dupStringSlice(d.Platform.OSFeatures),
- Variant: d.Platform.Variant,
- }
- if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
- return d.Digest, nil
- }
- }
- }
-
- for _, d := range index.Manifests {
- if d.Platform == nil {
- return d.Digest, nil
- }
- }
- return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
-}
-
-// Serialize returns the index in a blob format.
-// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
-func (index *OCI1Index) Serialize() ([]byte, error) {
- buf, err := json.Marshal(index)
- if err != nil {
- return nil, errors.Wrapf(err, "error marshaling OCI1Index %#v", index)
- }
- return buf, nil
-}
+type OCI1Index = manifest.OCI1IndexPublic
// OCI1IndexFromComponents creates an OCI1 image index instance from the
// supplied data.
func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
- index := OCI1Index{
- imgspecv1.Index{
- Versioned: imgspec.Versioned{SchemaVersion: 2},
- Manifests: make([]imgspecv1.Descriptor, len(components)),
- Annotations: dupStringStringMap(annotations),
- },
- }
- for i, component := range components {
- var platform *imgspecv1.Platform
- if component.Platform != nil {
- platform = &imgspecv1.Platform{
- Architecture: component.Platform.Architecture,
- OS: component.Platform.OS,
- OSVersion: component.Platform.OSVersion,
- OSFeatures: dupStringSlice(component.Platform.OSFeatures),
- Variant: component.Platform.Variant,
- }
- }
- m := imgspecv1.Descriptor{
- MediaType: component.MediaType,
- Size: component.Size,
- Digest: component.Digest,
- URLs: dupStringSlice(component.URLs),
- Annotations: dupStringStringMap(component.Annotations),
- Platform: platform,
- }
- index.Manifests[i] = m
- }
- return &index
+ return manifest.OCI1IndexPublicFromComponents(components, annotations)
}
// OCI1IndexClone creates a deep copy of the passed-in index.
func OCI1IndexClone(index *OCI1Index) *OCI1Index {
- return OCI1IndexFromComponents(index.Manifests, index.Annotations)
-}
-
-// ToOCI1Index returns the index encoded as an OCI1 index.
-func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
- return OCI1IndexClone(index), nil
-}
-
-// ToSchema2List returns the index encoded as a Schema2 list.
-func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
- components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
- for _, manifest := range index.Manifests {
- platform := manifest.Platform
- if platform == nil {
- platform = &imgspecv1.Platform{
- OS: runtime.GOOS,
- Architecture: runtime.GOARCH,
- }
- }
- converted := Schema2ManifestDescriptor{
- Schema2Descriptor{
- MediaType: manifest.MediaType,
- Size: manifest.Size,
- Digest: manifest.Digest,
- URLs: dupStringSlice(manifest.URLs),
- },
- Schema2PlatformSpec{
- OS: platform.OS,
- Architecture: platform.Architecture,
- OSFeatures: dupStringSlice(platform.OSFeatures),
- OSVersion: platform.OSVersion,
- Variant: platform.Variant,
- },
- }
- components = append(components, converted)
- }
- s2 := Schema2ListFromComponents(components)
- return s2, nil
+ return manifest.OCI1IndexPublicClone(index)
}
// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
-func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
- index := OCI1Index{
- Index: imgspecv1.Index{
- Versioned: imgspec.Versioned{SchemaVersion: 2},
- Manifests: []imgspecv1.Descriptor{},
- Annotations: make(map[string]string),
- },
- }
- if err := json.Unmarshal(manifest, &index); err != nil {
- return nil, errors.Wrapf(err, "error unmarshaling OCI1Index %q", string(manifest))
- }
- return &index, nil
-}
-
-// Clone returns a deep copy of this list and its contents.
-func (index *OCI1Index) Clone() List {
- return OCI1IndexClone(index)
-}
-
-// ConvertToMIMEType converts the passed-in image index to a manifest list of
-// the specified type.
-func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
- switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
- case DockerV2ListMediaType:
- return index.ToSchema2List()
- case imgspecv1.MediaTypeImageIndex:
- return index.Clone(), nil
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
- return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
- default:
- // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
- }
+func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
+ return manifest.OCI1IndexPublicFromManifest(manifestBlob)
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index c874eb775..1d1e26c84 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -2,38 +2,49 @@ package archive
import (
"context"
+ "fmt"
"io"
"os"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type ociArchiveImageDestination struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedDest types.ImageDestination
+ unpackedDest private.ImageDestination
tempDirRef tempDirOCIRef
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
- return nil, errors.Wrapf(err, "error creating oci reference")
+ return nil, fmt.Errorf("creating oci reference: %w", err)
}
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
if err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageDestination{ref: ref,
- unpackedDest: unpackedDest,
- tempDirRef: tempDirRef}, nil
+ d := &ociArchiveImageDestination{
+ ref: ref,
+ unpackedDest: imagedestination.FromPublic(unpackedDest),
+ tempDirRef: tempDirRef,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination.
@@ -87,29 +98,39 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
return false
}
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
+ return d.unpackedDest.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
+func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
+ return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
+// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
+func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
}
// PutManifest writes the manifest to the destination.
@@ -121,18 +142,21 @@ func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte,
return d.unpackedDest.PutManifest(ctx, m, instanceDigest)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest)
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted
-// after the directory is made, it is tarred up into a file and the directory is deleted
-func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
- return errors.Wrapf(err, "error storing image %q", d.ref.image)
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
+}
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ if err := d.unpackedDest.CommitWithOptions(ctx, options); err != nil {
+ return fmt.Errorf("storing image %q: %w", d.ref.image, err)
}
// path of directory to tar up
@@ -145,15 +169,20 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve
// tar converts the directory at src and saves it to dst
func tarDirectory(src, dst string) error {
// input is a stream of bytes from the archive of the directory at path
- input, err := archive.Tar(src, archive.Uncompressed)
+ input, err := archive.TarWithOptions(src, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ // Don’t include the data about the user account this code is running under.
+ ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
+ })
if err != nil {
- return errors.Wrapf(err, "error retrieving stream of bytes from %q", src)
+ return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
}
+ defer input.Close()
// creates the tar file
outFile, err := os.Create(dst)
if err != nil {
- return errors.Wrapf(err, "error creating tar file %q", dst)
+ return fmt.Errorf("creating tar file %q: %w", dst, err)
}
defer outFile.Close()
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
index 8f07b3307..d68814a17 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
@@ -2,40 +2,78 @@ package archive
import (
"context"
+ "errors"
+ "fmt"
"io"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
+// but nothing matches the “image†part of the provided reference.
+type ImageNotFoundError struct {
+ ref ociArchiveReference
+ // We may make members public, or add methods, in the future.
+}
+
+func (e ImageNotFoundError) Error() string {
+ return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
+}
+
+// ArchiveFileNotFoundError occurs when the archive file does not exist.
+type ArchiveFileNotFoundError struct {
+ // ref is the image reference
+ ref ociArchiveReference
+ // path is the file path that was not present
+ path string
+}
+
+func (e ArchiveFileNotFoundError) Error() string {
+ return fmt.Sprintf("archive file not found: %q", e.path)
+}
+
type ociArchiveImageSource struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedSrc types.ImageSource
+ unpackedSrc private.ImageSource
tempDirRef tempDirOCIRef
}
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource untars the file and saves it in a temp directory
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) {
tempDirRef, err := createUntarTempDir(sys, ref)
if err != nil {
- return nil, errors.Wrap(err, "error creating temp directory")
+ return nil, fmt.Errorf("creating temp directory: %w", err)
}
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
if err != nil {
+ var notFound ocilayout.ImageNotFoundError
+ if errors.As(err, ¬Found) {
+ err = ImageNotFoundError{ref: ref}
+ }
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageSource{ref: ref,
- unpackedSrc: unpackedSrc,
- tempDirRef: tempDirRef}, nil
+ s := &ociArchiveImageSource{
+ ref: ref,
+ unpackedSrc: imagesource.FromPublic(unpackedSrc),
+ tempDirRef: tempDirRef,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// LoadManifestDescriptor loads the manifest
@@ -48,11 +86,11 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociArchRef, ok := imgRef.(ociArchiveReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference")
}
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
+ return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err)
}
defer func() {
err := tempDirRef.deleteTempDir()
@@ -61,7 +99,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index")
+ return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err)
}
return descriptor, nil
}
@@ -101,12 +139,28 @@ func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo
return s.unpackedSrc.GetBlob(ctx, info, cache)
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
+ return s.unpackedSrc.SupportsGetBlobAt()
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+// If the Length for the last chunk is set to math.MaxUint64, then it
+// fully fetches the remaining data from the offset to the end of the blob.
+func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
+func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest)
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index c808539d2..d5fee3631 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -2,21 +2,21 @@ package archive
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
+ "io/fs"
"os"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
func init() {
@@ -123,11 +123,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -144,7 +140,7 @@ func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *typ
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
+ return errors.New("Deleting images not implemented for oci: images")
}
// struct to store the ociReference and temporary directory returned by createOCIRef
@@ -161,9 +157,9 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
- dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
+ dir, err := tmpdir.MkDirBigFileTemp(sys, "oci")
if err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
+ return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
}
ociRef, err := ocilayout.NewReference(dir, image)
if err != nil {
@@ -176,23 +172,29 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
// creates the temporary directory and copies the tarred content to it
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
- tempDirRef, err := createOCIRef(sys, ref.image)
- if err != nil {
- return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
- }
src := ref.resolvedFile
- dst := tempDirRef.tempDirectory
- // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
arch, err := os.Open(src)
if err != nil {
- return tempDirOCIRef{}, err
+ if errors.Is(err, fs.ErrNotExist) {
+ return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src}
+ } else {
+ return tempDirOCIRef{}, err
+ }
}
defer arch.Close()
+
+ tempDirRef, err := createOCIRef(sys, ref.image)
+ if err != nil {
+ return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
+ }
+ dst := tempDirRef.tempDirectory
+
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
- return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err)
}
return tempDirRef, nil
}
diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
index c2012e50e..53827b11a 100644
--- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
+++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
@@ -1,7 +1,8 @@
package internal
import (
- "github.com/pkg/errors"
+ "errors"
+ "fmt"
"path/filepath"
"regexp"
"runtime"
@@ -27,7 +28,7 @@ func ValidateImageName(image string) error {
var err error
if !refRegexp.MatchString(image) {
- err = errors.Errorf("Invalid image %s", image)
+ err = fmt.Errorf("Invalid image %s", image)
}
return err
}
@@ -57,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) {
}
func splitPathAndImageNonWindows(reference string) (string, string) {
- sep := strings.SplitN(reference, ":", 2)
- path := sep[0]
-
- var image string
- if len(sep) == 2 {
- image = sep[1]
- }
+ path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
return path, image
}
@@ -72,11 +67,11 @@ func ValidateOCIPath(path string) error {
if runtime.GOOS == "windows" {
// On Windows we must allow for a ':' as part of the path
if strings.Count(path, ":") > 1 {
- return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
}
} else {
if strings.Contains(path, ":") {
- return errors.Errorf("Invalid OCI reference: path %s contains a colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path)
}
}
return nil
@@ -96,7 +91,7 @@ func ValidateScope(scope string) error {
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
@@ -105,7 +100,7 @@ func ValidateScope(scope string) error {
func validateScopeWindows(scope string) error {
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
if !matched {
- return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
}
return nil
@@ -113,7 +108,7 @@ func validateScopeWindows(scope string) error {
func validateScopeNonWindows(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
new file mode 100644
index 000000000..08366a7e2
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
@@ -0,0 +1,183 @@
+package layout
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/fs"
+ "os"
+ "slices"
+
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// DeleteImage deletes the named image from the directory, if supported.
+func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ sharedBlobsDir := ""
+ if sys != nil && sys.OCISharedBlobDirPath != "" {
+ sharedBlobsDir = sys.OCISharedBlobDirPath
+ }
+
+ descriptor, descriptorIndex, err := ref.getManifestDescriptor()
+ if err != nil {
+ return err
+ }
+
+ blobsUsedByImage := make(map[digest.Digest]int)
+ if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil {
+ return err
+ }
+
+ blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+
+ err = ref.deleteBlobs(blobsToDelete)
+ if err != nil {
+ return err
+ }
+
+ return ref.deleteReferenceFromIndex(descriptorIndex)
+}
+
+// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself.
+func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error {
+ blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+
+ dest[descriptor.Digest]++
+ switch descriptor.MediaType {
+ case imgspecv1.MediaTypeImageManifest:
+ manifest, err := parseJSON[imgspecv1.Manifest](blobPath)
+ if err != nil {
+ return err
+ }
+ dest[manifest.Config.Digest]++
+ for _, layer := range manifest.Layers {
+ dest[layer.Digest]++
+ }
+ case imgspecv1.MediaTypeImageIndex:
+ index, err := parseIndex(blobPath)
+ if err != nil {
+ return err
+ }
+ if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
+ }
+ return nil
+}
+
+// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself.
+func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
+ for _, descriptor := range index.Manifests {
+ if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// This takes in a map of the digest and their usage count in the manifest to be deleted
+// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
+func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
+ rootIndex, err := ref.getIndex()
+ if err != nil {
+ return nil, err
+ }
+ blobsUsedInRootIndex := make(map[digest.Digest]int)
+ err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+
+ blobsToDelete := set.New[digest.Digest]()
+
+ for digest, count := range blobsUsedInRootIndex {
+ if count-blobsUsedByDescriptorToDelete[digest] == 0 {
+ blobsToDelete.Add(digest)
+ }
+ }
+
+ return blobsToDelete, nil
+}
+
+// This transport never generates layouts where blobs for an image are both in the local blobs directory
+// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set.
+//
+// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what
+// the other layouts sharing that directory are, and we might not even have permission to read them),
+// so we can’t really delete any blobs in that case.
+// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt,
+// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently
+// check for local blobs (but we should make no noise if the blobs are actually in the shared directory).
+//
+// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
+func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
+ for _, digest := range blobsToDelete.Values() {
+ blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
+ if err != nil {
+ return err
+ }
+ err = deleteBlob(blobPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func deleteBlob(blobPath string) error {
+ logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath))
+
+ err := os.Remove(blobPath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ } else {
+ return nil
+ }
+}
+
+func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
+ index, err := ref.getIndex()
+ if err != nil {
+ return err
+ }
+
+ index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1)
+
+ return saveJSON(ref.indexPath(), index)
+}
+
+func saveJSON(path string, content any) error {
+ // If the file already exists, get its mode to preserve it
+ var mode fs.FileMode
+ existingfi, err := os.Stat(path)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ } else { // File does not exist, use default mode
+ mode = 0644
+ }
+ } else {
+ mode = existingfi.Mode()
+ }
+
+ file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return json.NewEncoder(file).Encode(content)
+}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index 1230e8ca3..85374cecf 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -3,29 +3,40 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
+ "slices"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type ociImageDestination struct {
- ref ociReference
- index imgspecv1.Index
- sharedBlobDir string
- acceptUncompressedLayers bool
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ ref ociReference
+ index imgspecv1.Index
+ sharedBlobDir string
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
var index *imgspecv1.Index
if indexExists(ref) {
var err error
@@ -42,10 +53,32 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
}
}
- d := &ociImageDestination{ref: ref, index: *index}
+ desiredLayerCompression := types.Compress
+ if sys != nil && sys.OCIAcceptUncompressedLayers {
+ desiredLayerCompression = types.PreserveOriginal
+ }
+
+ d := &ociImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ },
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: true,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+ NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"),
+
+ ref: ref,
+ index: *index,
+ }
+ d.Compat = impl.AddCompat(d)
if sys != nil {
d.sharedBlobDir = sys.OCISharedBlobDirPath
- d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers
}
if err := ensureDirectoryExists(d.ref.dir); err != nil {
@@ -54,7 +87,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
// Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
// but it MAY be empty (e.g. if we never end up calling PutBlob)
// https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
- if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil {
+ if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil {
return nil, err
}
return d, nil
@@ -71,61 +104,17 @@ func (d *ociImageDestination) Close() error {
return nil
}
-func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- imgspecv1.MediaTypeImageManifest,
- imgspecv1.MediaTypeImageIndex,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
-}
-
-func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.acceptUncompressedLayers {
- return types.PreserveOriginal
- }
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ociImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ociImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
succeeded := false
explicitClosed := false
@@ -138,20 +127,18 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@@ -160,54 +147,53 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
}
- blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir)
+ blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
if err := ensureParentDirectoryExists(blobPath); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
succeeded = true
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
+// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ return false, private.ReusedBlob{}, nil
+ }
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, types.BlobInfo{}, nil
+ return false, private.ReusedBlob{}, nil
}
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
- return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
+ return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
@@ -239,7 +225,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
if err := ensureParentDirectoryExists(blobPath); err != nil {
return err
}
- if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
+ if err := os.WriteFile(blobPath, m, 0644); err != nil {
return err
}
@@ -288,37 +274,33 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
return
}
}
- // It's a new entry to be added to the index.
- d.index.Manifests = append(d.index.Manifests, *desc)
+ // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created.
+ d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
}
-// PutSignatures would add the given signatures to the oci layout (currently not supported).
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
-// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if len(signatures) != 0 {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
- }
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
- if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
+ Version: imgspecv1.ImageLayoutVersion,
+ })
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil {
return err
}
indexJSON, err := json.Marshal(d.index)
if err != nil {
return err
}
- return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
+ return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
}
func ensureDirectoryExists(path string) error {
- if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
@@ -334,7 +316,7 @@ func ensureParentDirectoryExists(path string) error {
// indexExists checks whether the index location specified in the OCI reference exists.
// The implementation is opinionated, since in case of unexpected errors false is returned
func indexExists(ref ociReference) bool {
- _, err := os.Stat(ref.indexPath())
+ err := fileutils.Exists(ref.indexPath())
if err == nil {
return true
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 9925aeda7..adfd20644 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -2,22 +2,43 @@ package layout
import (
"context"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"net/http"
+ "net/url"
"os"
"strconv"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
+// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
+// but nothing matches the “image†part of the provided reference.
+type ImageNotFoundError struct {
+ ref ociReference
+ // We may make members public, or add methods, in the future.
+}
+
+func (e ImageNotFoundError) Error() string {
+ return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
+}
+
type ociImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref ociReference
index *imgspecv1.Index
descriptor imgspecv1.Descriptor
@@ -26,7 +47,7 @@ type ociImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) {
+func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) {
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = tlsconfig.ServerDefault()
@@ -39,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour
client := &http.Client{}
client.Transport = tr
- descriptor, err := ref.getManifestDescriptor()
+ descriptor, _, err := ref.getManifestDescriptor()
if err != nil {
return nil, err
}
@@ -47,12 +68,23 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour
if err != nil {
return nil, err
}
- d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client}
+ s := &ociImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ index: index,
+ descriptor: descriptor,
+ client: client,
+ }
if sys != nil {
// TODO(jonboulle): check dir existence?
- d.sharedBlobDir = sys.OCISharedBlobDirPath
+ s.sharedBlobDir = sys.OCISharedBlobDirPath
}
- return d, nil
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -62,6 +94,7 @@ func (s *ociImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ociImageSource) Close() error {
+ s.client.CloseIdleConnections()
return nil
}
@@ -75,7 +108,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
var err error
if instanceDigest == nil {
- dig = digest.Digest(s.descriptor.Digest)
+ dig = s.descriptor.Digest
mimeType = s.descriptor.MediaType
} else {
dig = *instanceDigest
@@ -92,7 +125,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return nil, "", err
}
- m, err := ioutil.ReadFile(manifestPath)
+ m, err := os.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
@@ -103,17 +136,17 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ociImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
- return s.getExternalBlob(ctx, info.URLs)
+ r, s, err := s.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
}
path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir)
@@ -132,58 +165,48 @@ func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return [][]byte{}, nil
-}
-
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
errWrap := errors.New("failed fetching external blob from all urls")
- for _, url := range urls {
-
- req, err := http.NewRequest("GET", url, nil)
+ hasSupportedURL := false
+ for _, u := range urls {
+ if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ hasSupportedURL = true
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
+ errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
continue
}
- resp, err := s.client.Do(req.WithContext(ctx))
+ resp, err := s.client.Do(req)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
+ errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
continue
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
- errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url)
+ errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap)
continue
}
return resp.Body, getBlobSize(resp), nil
}
+ if !hasSupportedURL {
+ return nil, 0, nil // fallback to non-external blob
+ }
return nil, 0, errWrap
}
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index a99b63158..816dfa7a1 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -3,6 +3,7 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -10,13 +11,12 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
func init() {
@@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ociReference) StringWithinTransport() string {
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
@@ -154,64 +154,62 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
// with an error.
func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
- indexJSON, err := os.Open(ref.indexPath())
+ return parseIndex(ref.indexPath())
+}
+
+func parseIndex(path string) (*imgspecv1.Index, error) {
+ return parseJSON[imgspecv1.Index](path)
+}
+
+func parseJSON[T any](path string) (*T, error) {
+ content, err := os.Open(path)
if err != nil {
return nil, err
}
- defer indexJSON.Close()
+ defer content.Close()
- index := &imgspecv1.Index{}
- if err := json.NewDecoder(indexJSON).Decode(index); err != nil {
+ obj := new(T)
+ if err := json.NewDecoder(content).Decode(obj); err != nil {
return nil, err
}
- return index, nil
+ return obj, nil
}
-func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
+func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) {
index, err := ref.getIndex()
if err != nil {
- return imgspecv1.Descriptor{}, err
+ return imgspecv1.Descriptor{}, -1, err
}
- var d *imgspecv1.Descriptor
if ref.image == "" {
// return manifest if only one image is in the oci directory
- if len(index.Manifests) == 1 {
- d = &index.Manifests[0]
- } else {
+ if len(index.Manifests) != 1 {
// ask user to choose image when more than one image in the oci directory
- return imgspecv1.Descriptor{}, ErrMoreThanOneImage
+ return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
}
+ return index.Manifests[0], 0, nil
} else {
// if image specified, look through all manifests for a match
- for _, md := range index.Manifests {
- if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex {
- continue
- }
- refName, ok := md.Annotations[imgspecv1.AnnotationRefName]
- if !ok {
- continue
- }
- if refName == ref.image {
- d = &md
- break
+ var unsupportedMIMETypes []string
+ for i, md := range index.Manifests {
+ if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
+ if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
+ return md, i, nil
+ }
+ unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
}
}
+ if len(unsupportedMIMETypes) != 0 {
+ return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
+ }
}
- if d == nil {
- return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image)
- }
- return *d, nil
+ return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
}
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
@@ -219,9 +217,10 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociRef, ok := imgRef.(ociReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
}
- return ociRef.getManifestDescriptor()
+ md, _, err := ociRef.getManifestDescriptor()
+ return md, err
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -236,29 +235,26 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
return newImageDestination(sys, ref)
}
-// DeleteImage deletes the named image from the registry, if supported.
-func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
-}
-
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
func (ref ociReference) ociLayoutPath() string {
- return filepath.Join(ref.dir, "oci-layout")
+ return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile)
}
// indexPath returns a path for the index.json within a directory using OCI conventions.
func (ref ociReference) indexPath() string {
- return filepath.Join(ref.dir, "index.json")
+ return filepath.Join(ref.dir, imgspecv1.ImageIndexFile)
}
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) {
if err := digest.Validate(); err != nil {
- return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
+ return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
}
- blobDir := filepath.Join(ref.dir, "blobs")
+ var blobDir string
if sharedBlobDir != "" {
blobDir = sharedBlobDir
+ } else {
+ blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
}
- return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
+ return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index ec88b4ebf..cef3dcccf 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -3,25 +3,26 @@ package openshift
import (
"crypto/tls"
"crypto/x509"
- "encoding/json"
+ "encoding/base64"
+ "errors"
"fmt"
- "io/ioutil"
"net"
"net/http"
+ "net/netip"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
+ "slices"
"strings"
"time"
+ "dario.cat/mergo"
+ "github.com/containers/image/v5/internal/multierr"
"github.com/containers/storage/pkg/homedir"
- "github.com/ghodss/yaml"
- "github.com/imdario/mergo"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "golang.org/x/net/http2"
+ "gopkg.in/yaml.v3"
)
// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig.
@@ -206,15 +207,12 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if isConfigTransportTLS(*clientConfig) {
var err error
// REMOVED: Support for interactive fallback.
- userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
- if err != nil {
- return nil, err
- }
+ userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo)
if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
return nil, err
}
- serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
+ serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo)
if err != nil {
return nil, err
}
@@ -233,7 +231,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. load the ~/.kubernetes_auth file as a default
-func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
+func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) {
mergedConfig := &restConfig{}
// configClusterInfo holds the information identify the server provided by .kubeconfig
@@ -256,7 +254,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
-func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
+func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig {
mergedConfig := &restConfig{}
// blindly overwrite existing values based on precedence
@@ -275,11 +273,11 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest
}
// REMOVED: prompting for missing information.
- return mergedConfig, nil
+ return mergedConfig
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
-// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
+// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
func (config *directClientConfig) ConfirmUsable() error {
var validationErrors []error
@@ -333,7 +331,7 @@ var (
errEmptyCluster = errors.New("cluster has no server defined")
)
-//helper for checking certificate/key/CA
+// helper for checking certificate/key/CA
func validateFileIsReadable(name string) error {
answer, err := os.Open(name)
defer func() {
@@ -355,19 +353,19 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
if len(clusterInfo.Server) == 0 {
if len(clusterName) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
+ validationErrors = append(validationErrors, errors.New("default cluster has no server defined"))
} else {
- validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
}
}
// Make sure CA data and CA file aren't both specified
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
}
if len(clusterInfo.CertificateAuthority) != 0 {
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err))
}
}
@@ -391,34 +389,34 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
// Make sure cert data and file aren't both specified
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
}
// Make sure key data and file aren't both specified
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
}
// Make sure a key is specified
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
}
if len(authInfo.ClientCertificate) != 0 {
err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err))
}
}
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) {
- validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
}
return validationErrors
@@ -462,12 +460,6 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
return mergedClusterInfo
}
-// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
-// This helper implements the error and Errors interfaces. Keeping it private
-// prevents people from making an aggregate of 0 errors, which is not
-// an error, but does satisfy the error interface.
-type aggregateErr []error
-
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
// NewAggregate converts a slice of errors into an Aggregate interface, which
// is itself an implementation of the error interface. If the slice is empty,
@@ -488,29 +480,9 @@ func newAggregate(errlist []error) error {
if len(errs) == 0 {
return nil
}
- return aggregateErr(errs)
+ return multierr.Format("[", ", ", "]", errs)
}
-// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
-// Error is part of the error interface.
-func (agg aggregateErr) Error() string {
- if len(agg) == 0 {
- // This should never happen, really.
- return ""
- }
- if len(agg) == 1 {
- return agg[0].Error()
- }
- result := fmt.Sprintf("[%s", agg[0].Error())
- for i := 1; i < len(agg); i++ {
- result += fmt.Sprintf(", %s", agg[i].Error())
- }
- result += "]"
- return result
-}
-
-// REMOVED: aggregateErr.Errors
-
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
type errConfigurationInvalid []error
@@ -546,8 +518,10 @@ type clientConfigLoadingRules struct {
// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
// Load starts by running the MigrationRules and then
// takes the loading rules and returns a Config object based on following rules.
-// if the ExplicitPath, return the unmerged explicit file
-// Otherwise, return a merged config based on the Precedence slice
+//
+// - if the ExplicitPath, return the unmerged explicit file
+// - Otherwise, return a merged config based on the Precedence slice
+//
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
// Read errors or files with non-deserializable content produce errors.
// The first file to set a particular map key wins and map key's value is never changed.
@@ -579,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
continue
}
if err != nil {
- errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
+ errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err))
continue
}
@@ -625,7 +599,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
// LoadFromFile takes a filename and deserializes the contents into Config object
func loadFromFile(filename string) (*clientcmdConfig, error) {
- kubeconfigBytes, err := ioutil.ReadFile(filename)
+ kubeconfigBytes, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
@@ -671,11 +645,7 @@ func load(data []byte) (*clientcmdConfig, error) {
return config, nil
}
// Note: This does absolutely no kind/version checking or conversions.
- data, err := yaml.YAMLToJSON(data)
- if err != nil {
- return nil, err
- }
- if err := json.Unmarshal(data, config); err != nil {
+ if err := yaml.Unmarshal(data, config); err != nil {
return nil, err
}
return config, nil
@@ -692,7 +662,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err)
}
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
@@ -705,7 +675,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err)
}
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
@@ -775,7 +745,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
// Kubernetes API.
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" {
- return nil, errors.Errorf("host must be a URL or a host:port pair")
+ return nil, errors.New("host must be a URL or a host:port pair")
}
base := host
hostURL, err := url.Parse(base)
@@ -792,7 +762,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
return nil, err
}
if hostURL.Path != "" && hostURL.Path != "/" {
- return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
+ return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
}
}
@@ -862,7 +832,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
- return nil, errors.Errorf("username/password or bearer token may be set, but not both")
+ return nil, errors.New("username/password or bearer token may be set, but not both")
}
return rt, nil
@@ -876,11 +846,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
noProxyEnv := os.Getenv("NO_PROXY")
noProxyRules := strings.Split(noProxyEnv, ",")
- cidrs := []*net.IPNet{}
+ cidrs := []netip.Prefix{}
for _, noProxyRule := range noProxyRules {
- _, cidr, _ := net.ParseCIDR(noProxyRule)
- if cidr != nil {
- cidrs = append(cidrs, cidr)
+ prefix, err := netip.ParsePrefix(noProxyRule)
+ if err == nil {
+ cidrs = append(cidrs, prefix)
}
}
@@ -891,7 +861,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
return func(req *http.Request) (*url.URL, error) {
host := req.URL.Host
// for some urls, the Host is already the host, not the host:port
- if net.ParseIP(host) == nil {
+ if _, err := netip.ParseAddr(host); err != nil {
var err error
host, _, err = net.SplitHostPort(req.URL.Host)
if err != nil {
@@ -899,15 +869,15 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
}
- ip := net.ParseIP(host)
- if ip == nil {
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
return delegate(req)
}
- for _, cidr := range cidrs {
- if cidr.Contains(ip) {
- return nil, nil
- }
+ if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool {
+ return cidr.Contains(ip)
+ }) {
+ return nil, nil
}
return delegate(req)
@@ -935,14 +905,14 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
- Dial: (&net.Dialer{
+ DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
- }).Dial,
+ }).DialContext,
}
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
- _ = http2.ConfigureTransport(t)
+ t.ForceAttemptHTTP2 = true
}
return t, nil
}
@@ -955,15 +925,13 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
return nil, nil
}
if c.HasCA() && c.Insecure {
- return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+ return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed")
}
if err := loadTLSFiles(c); err != nil {
return nil, err
}
tlsConfig := &tls.Config{
- // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
- MinVersion: tls.VersionTLS10,
InsecureSkipVerify: c.Insecure,
}
@@ -1013,7 +981,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
return data, nil
}
if len(file) > 0 {
- fileData, err := ioutil.ReadFile(file)
+ fileData, err := os.ReadFile(file)
if err != nil {
return []byte{}, err
}
@@ -1056,20 +1024,20 @@ func (c *restConfig) HasCertAuth() bool {
// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
type clientcmdConfig struct {
// Clusters is a map of referenceable names to cluster configs
- Clusters clustersMap `json:"clusters"`
+ Clusters clustersMap `yaml:"clusters"`
// AuthInfos is a map of referenceable names to user configs
- AuthInfos authInfosMap `json:"users"`
+ AuthInfos authInfosMap `yaml:"users"`
// Contexts is a map of referenceable names to context configs
- Contexts contextsMap `json:"contexts"`
+ Contexts contextsMap `yaml:"contexts"`
// CurrentContext is the name of the context that you would like to use by default
- CurrentContext string `json:"current-context"`
+ CurrentContext string `yaml:"current-context"`
}
type clustersMap map[string]*clientcmdCluster
-func (m *clustersMap) UnmarshalJSON(data []byte) error {
+func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedCluster
- if err := json.Unmarshal(data, &a); err != nil {
+ if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@@ -1081,9 +1049,9 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error {
type authInfosMap map[string]*clientcmdAuthInfo
-func (m *authInfosMap) UnmarshalJSON(data []byte) error {
+func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedAuthInfo
- if err := json.Unmarshal(data, &a); err != nil {
+ if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@@ -1095,9 +1063,9 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error {
type contextsMap map[string]*clientcmdContext
-func (m *contextsMap) UnmarshalJSON(data []byte) error {
+func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedContext
- if err := json.Unmarshal(data, &a); err != nil {
+ if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@@ -1117,19 +1085,32 @@ func clientcmdNewConfig() *clientcmdConfig {
}
}
+// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary
+type yamlBinaryAsBase64String []byte
+
+func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error {
+ res := make([]byte, base64.StdEncoding.DecodedLen(len(text)))
+ n, err := base64.StdEncoding.Decode(res, text)
+ if err != nil {
+ return err
+ }
+ *bin = res[:n]
+ return nil
+}
+
// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
// Cluster contains information about how to communicate with a kubernetes cluster
type clientcmdCluster struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Server is the address of the kubernetes cluster (https://hostname:port).
- Server string `json:"server"`
+ Server string `yaml:"server"`
// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
- InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"`
// CertificateAuthority is the path to a cert file for the certificate authority.
- CertificateAuthority string `json:"certificate-authority,omitempty"`
+ CertificateAuthority string `yaml:"certificate-authority,omitempty"`
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
- CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+ CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"`
}
// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
@@ -1138,19 +1119,19 @@ type clientcmdAuthInfo struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// ClientCertificate is the path to a client cert file for TLS.
- ClientCertificate string `json:"client-certificate,omitempty"`
+ ClientCertificate string `yaml:"client-certificate,omitempty"`
// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
- ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+ ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"`
// ClientKey is the path to a client key file for TLS.
- ClientKey string `json:"client-key,omitempty"`
+ ClientKey string `yaml:"client-key,omitempty"`
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
- ClientKeyData []byte `json:"client-key-data,omitempty"`
+ ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"`
// Token is the bearer token for authentication to the kubernetes cluster.
- Token string `json:"token,omitempty"`
+ Token string `yaml:"token,omitempty"`
// Username is the username for basic authentication to the kubernetes cluster.
- Username string `json:"username,omitempty"`
+ Username string `yaml:"username,omitempty"`
// Password is the password for basic authentication to the kubernetes cluster.
- Password string `json:"password,omitempty"`
+ Password string `yaml:"password,omitempty"`
}
// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
@@ -1159,36 +1140,36 @@ type clientcmdContext struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Cluster is the name of the cluster for this context
- Cluster string `json:"cluster"`
+ Cluster string `yaml:"cluster"`
// AuthInfo is the name of the authInfo for this context
- AuthInfo string `json:"user"`
+ AuthInfo string `yaml:"user"`
// Namespace is the default namespace to use on unspecified requests
- Namespace string `json:"namespace,omitempty"`
+ Namespace string `yaml:"namespace,omitempty"`
}
// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
// NamedCluster relates nicknames to cluster information
type v1NamedCluster struct {
// Name is the nickname for this Cluster
- Name string `json:"name"`
+ Name string `yaml:"name"`
// Cluster holds the cluster information
- Cluster clientcmdCluster `json:"cluster"`
+ Cluster clientcmdCluster `yaml:"cluster"`
}
// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
// NamedContext relates nicknames to context information
type v1NamedContext struct {
// Name is the nickname for this Context
- Name string `json:"name"`
+ Name string `yaml:"name"`
// Context holds the context information
- Context clientcmdContext `json:"context"`
+ Context clientcmdContext `yaml:"context"`
}
// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
// NamedAuthInfo relates nicknames to auth information
type v1NamedAuthInfo struct {
// Name is the nickname for this AuthInfo
- Name string `json:"name"`
+ Name string `yaml:"name"`
// AuthInfo holds the auth information
- AuthInfo clientcmdAuthInfo `json:"user"`
+ AuthInfo clientcmdAuthInfo `yaml:"user"`
}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 426046e66..63ca8371e 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -3,22 +3,17 @@ package openshift
import (
"bytes"
"context"
- "crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
- "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -70,20 +65,23 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
}, nil
}
+func (c *openshiftClient) close() {
+ c.httpClient.CloseIdleConnections()
+}
+
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
- url := *c.baseURL
- url.Path = path
+ requestURL := *c.baseURL
+ requestURL.Path = path
var requestBodyReader io.Reader
if requestBody != nil {
logrus.Debugf("Will send body: %s", requestBody)
requestBodyReader = bytes.NewReader(requestBody)
}
- req, err := http.NewRequest(method, url.String(), requestBodyReader)
+ req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), requestBodyReader)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
if len(c.bearerToken) != 0 {
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
@@ -96,7 +94,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
req.Header.Set("Content-Type", "application/json")
}
- logrus.Debugf("%s %s", method, url.String())
+ logrus.Debugf("%s %s", method, requestURL.Redacted())
res, err := c.httpClient.Do(req)
if err != nil {
return nil, err
@@ -127,7 +125,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
if statusValid {
return nil, errors.New(status.Message)
}
- return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
+ return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
}
return body, nil
@@ -137,7 +135,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
- body, err := c.doRequest(ctx, "GET", path, nil)
+ body, err := c.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
@@ -152,365 +150,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
- parts := strings.SplitN(ref, "/", 2)
- if len(parts) != 2 {
- return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
- }
- return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
-}
-
-type openshiftImageSource struct {
- client *openshiftClient
- // Values specific to this image
- sys *types.SystemContext
- // State
- docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
- imageStreamImageName string // Resolved image identifier, or "" if not known yet
-}
-
-// newImageSource creates a new ImageSource for the specified reference.
-// The caller must call .Close() on the returned ImageSource.
-func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageSource{
- client: client,
- sys: sys,
- }, nil
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (s *openshiftImageSource) Reference() types.ImageReference {
- return s.client.ref
-}
-
-// Close removes resources associated with an initialized ImageSource, if any.
-func (s *openshiftImageSource) Close() error {
- if s.docker != nil {
- err := s.docker.Close()
- s.docker = nil
-
- return err
- }
-
- return nil
-}
-
-// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
-// It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
-// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, "", err
- }
- return s.docker.GetManifest(ctx, instanceDigest)
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, 0, err
- }
- return s.docker.GetBlob(ctx, info, cache)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- var imageStreamImageName string
- if instanceDigest == nil {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, err
- }
- imageStreamImageName = s.imageStreamImageName
- } else {
- imageStreamImageName = instanceDigest.String()
- }
- image, err := s.client.getImage(ctx, imageStreamImageName)
- if err != nil {
- return nil, err
- }
- var sigs [][]byte
- for _, sig := range image.Signatures {
- if sig.Type == imageSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
- }
- }
- return sigs, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
-// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
-func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
- if s.docker != nil {
- return nil
- }
-
- // FIXME: validate components per validation.IsValidPathSegmentName?
- path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
- body, err := s.client.doRequest(ctx, "GET", path, nil)
- if err != nil {
- return err
- }
- // Note: This does absolutely no kind/version checking or conversions.
- var is imageStream
- if err := json.Unmarshal(body, &is); err != nil {
- return err
- }
- var te *tagEvent
- for _, tag := range is.Status.Tags {
- if tag.Tag != s.client.ref.dockerReference.Tag() {
- continue
- }
- if len(tag.Items) > 0 {
- te = &tag.Items[0]
- break
- }
- }
- if te == nil {
- return errors.Errorf("No matching tag found")
- }
- logrus.Debugf("tag event %#v", te)
- dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
- if err != nil {
- return err
- }
- logrus.Debugf("Resolved reference %#v", dockerRefString)
- dockerRef, err := docker.ParseReference("//" + dockerRefString)
- if err != nil {
- return err
- }
- d, err := dockerRef.NewImageSource(ctx, s.sys)
- if err != nil {
- return err
- }
- s.docker = d
- s.imageStreamImageName = te.Image
- return nil
-}
-
-type openshiftImageDestination struct {
- client *openshiftClient
- docker types.ImageDestination // The Docker Registry endpoint
- // State
- imageStreamImageName string // "" if not yet known
-}
-
-// newImageDestination creates a new ImageDestination for the specified reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
- // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
- // the manifest digest at this point.
- dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
- dockerRef, err := docker.ParseReference(dockerRefString)
- if err != nil {
- return nil, err
- }
- docker, err := dockerRef.NewImageDestination(ctx, sys)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageDestination{
- client: client,
- docker: docker,
- }, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (d *openshiftImageDestination) Reference() types.ImageReference {
- return d.client.ref
-}
-
-// Close removes resources associated with an initialized ImageDestination, if any.
-func (d *openshiftImageDestination) Close() error {
- return d.docker.Close()
-}
-
-func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
- return d.docker.SupportedManifestMIMETypes()
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
- return d.docker.IgnoresEmbeddedDockerReference()
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
-// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
-}
-
-// PutManifest writes manifest to the destination.
-// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
-// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
-// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- if instanceDigest == nil {
- manifestDigest, err := manifest.Digest(m)
- if err != nil {
- return err
- }
- d.imageStreamImageName = manifestDigest.String()
- }
- return d.docker.PutManifest(ctx, m, instanceDigest)
-}
-
-func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- var imageStreamName string
- if instanceDigest == nil {
- if d.imageStreamImageName == "" {
- return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
- }
- imageStreamName = d.imageStreamImageName
- } else {
- imageStreamName = instanceDigest.String()
- }
-
- // Because image signatures are a shared resource in Atomic Registry, the default upload
- // always adds signatures. Eventually we should also allow removing signatures.
-
- if len(signatures) == 0 {
- return nil // No need to even read the old state.
+ _, repo, gotRepo := strings.Cut(ref, "/")
+ if !gotRepo {
+ return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref)
}
-
- image, err := d.client.getImage(ctx, imageStreamName)
- if err != nil {
- return err
- }
- existingSigNames := map[string]struct{}{}
- for _, sig := range image.Signatures {
- existingSigNames[sig.objectMeta.Name] = struct{}{}
- }
-
-sigExists:
- for _, newSig := range signatures {
- for _, existingSig := range image.Signatures {
- if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
- continue sigExists
- }
- }
-
- // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
- var signatureName string
- for {
- randBytes := make([]byte, 16)
- n, err := rand.Read(randBytes)
- if err != nil || n != 16 {
- return errors.Wrapf(err, "Error generating random signature len %d", n)
- }
- signatureName = fmt.Sprintf("%s@%032x", imageStreamName, randBytes)
- if _, ok := existingSigNames[signatureName]; !ok {
- break
- }
- }
- // Note: This does absolutely no kind/version checking or conversions.
- sig := imageSignature{
- typeMeta: typeMeta{
- Kind: "ImageSignature",
- APIVersion: "v1",
- },
- objectMeta: objectMeta{Name: signatureName},
- Type: imageSignatureTypeAtomic,
- Content: newSig,
- }
- body, err := json.Marshal(sig)
- if err != nil {
- return err
- }
- _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- return d.docker.Commit(ctx, unparsedToplevel)
+ return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
new file mode 100644
index 000000000..61f69e93f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
@@ -0,0 +1,245 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "slices"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+type openshiftImageDestination struct {
+ impl.Compat
+ stubs.AlwaysSupportsSignatures
+
+ client *openshiftClient
+ docker private.ImageDestination // The docker/distribution API endpoint
+ // State
+ imageStreamImageName string // "" if not yet known
+}
+
+// newImageDestination creates a new ImageDestination for the specified reference.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
+ // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
+ // the manifest digest at this point.
+ dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
+ dockerRef, err := docker.ParseReference(dockerRefString)
+ if err != nil {
+ return nil, err
+ }
+ docker, err := dockerRef.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+
+ d := &openshiftImageDestination{
+ client: client,
+ docker: imagedestination.FromPublic(docker),
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *openshiftImageDestination) Reference() types.ImageReference {
+ return d.client.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *openshiftImageDestination) Close() error {
+ err := d.docker.Close()
+ d.client.close()
+ return err
+}
+
+func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
+ return d.docker.SupportedManifestMIMETypes()
+}
+
+func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return types.Compress
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.docker.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
+ return d.docker.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
+func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
+ return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ return d.docker.TryReusingBlobWithOptions(ctx, info, options)
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
+ }
+ return d.docker.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if d.imageStreamImageName == "" {
+ return errors.New("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ imageStreamImageName = d.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures.
+
+ if len(signatures) == 0 {
+ return nil // No need to even read the old state.
+ }
+
+ image, err := d.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return err
+ }
+ existingSigNames := set.New[string]()
+ for _, sig := range image.Signatures {
+ existingSigNames.Add(sig.objectMeta.Name)
+ }
+
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
+ return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
+ }) {
+ continue
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
+ if !existingSigNames.Contains(signatureName) {
+ break
+ }
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ sig := imageSignature{
+ typeMeta: typeMeta{
+ Kind: "ImageSignature",
+ APIVersion: "v1",
+ },
+ objectMeta: objectMeta{Name: signatureName},
+ Type: imageSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ return d.docker.CommitWithOptions(ctx, options)
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
new file mode 100644
index 000000000..62774afbb
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
@@ -0,0 +1,177 @@
+package openshift
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type openshiftImageSource struct {
+ impl.Compat
+ impl.DoesNotAffectLayerInfosForCopy
+ // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(),
+ // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we
+ // can change it), but this is a deprecated transport anyway, so for now we just punt.
+ stubs.NoGetBlobAtInitialize
+
+ client *openshiftClient
+ // Values specific to this image
+ sys *types.SystemContext
+ // State
+ docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
+ imageStreamImageName string // Resolved image identifier, or "" if not known yet
+}
+
+// newImageSource creates a new ImageSource for the specified reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &openshiftImageSource{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ client: client,
+ sys: sys,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *openshiftImageSource) Reference() types.ImageReference {
+ return s.client.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *openshiftImageSource) Close() error {
+ var err error
+ if s.docker != nil {
+ err = s.docker.Close()
+ s.docker = nil
+ }
+
+ s.client.close()
+
+ return err
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, 0, err
+ }
+ return s.docker.GetBlob(ctx, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+ imageStreamImageName = s.imageStreamImageName
+ } else {
+ if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
+ return nil, err
+ }
+ imageStreamImageName = instanceDigest.String()
+ }
+ image, err := s.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return nil, err
+ }
+ var sigs []signature.Signature
+ for _, sig := range image.Signatures {
+ if sig.Type == imageSignatureTypeAtomic {
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return sigs, nil
+}
+
+// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
+func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
+ if s.docker != nil {
+ return nil
+ }
+
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var is imageStream
+ if err := json.Unmarshal(body, &is); err != nil {
+ return err
+ }
+ var te *tagEvent
+ for _, tag := range is.Status.Tags {
+ if tag.Tag != s.client.ref.dockerReference.Tag() {
+ continue
+ }
+ if len(tag.Items) > 0 {
+ te = &tag.Items[0]
+ break
+ }
+ }
+ if te == nil {
+ return errors.New("No matching tag found")
+ }
+ logrus.Debugf("tag event %#v", te)
+ dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Resolved reference %#v", dockerRefString)
+ dockerRef, err := docker.ParseReference("//" + dockerRefString)
+ if err != nil {
+ return err
+ }
+ d, err := dockerRef.NewImageSource(ctx, s.sys)
+ if err != nil {
+ return err
+ }
+ s.docker = d
+ s.imageStreamImageName = te.Image
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
index 6bbb43be2..0ba435d56 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
@@ -2,16 +2,16 @@ package openshift
import (
"context"
+ "errors"
"fmt"
- "regexp"
"strings"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- genericImage "github.com/containers/image/v5/image"
+ genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/regexp"
)
func init() {
@@ -35,7 +35,7 @@ func (t openshiftTransport) ParseReference(reference string) (types.ImageReferen
// Note that imageNameRegexp is namespace/stream:tag, this
// is HOSTNAME/namespace/stream:tag or parent prefixes.
// Keep this in sync with imageNameRegexp!
-var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
+var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
@@ -43,7 +43,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// scope passed to this function will not be "", that value is always allowed.
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
if scopeRegexp.FindStringIndex(scope) == nil {
- return errors.Errorf("Invalid scope name %s", scope)
+ return fmt.Errorf("Invalid scope name %s", scope)
}
return nil
}
@@ -59,11 +59,11 @@ type openshiftReference struct {
func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
+ return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
}
tagged, ok := r.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
+ return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
}
return NewReference(tagged)
}
@@ -72,7 +72,7 @@ func ParseReference(ref string) (types.ImageReference, error) {
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(reference.Path(dockerRef), "/", 3)
if len(r) != 2 {
- return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
+ return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
reference.FamiliarString(dockerRef))
}
return openshiftReference{
@@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return reference.FamiliarString(ref.dockerReference)
@@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return genericImage.FromSource(ctx, sys, src)
+ return genericImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -153,5 +149,5 @@ func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *type
// DeleteImage deletes the named image from the registry, if supported.
func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for atomic: images")
+ return errors.New("Deleting images not implemented for atomic: images")
}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index c91a49c57..d4ebe413b 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
@@ -7,9 +8,10 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"os/exec"
"path/filepath"
@@ -20,14 +22,19 @@ import (
"time"
"unsafe"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -64,6 +71,11 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
ref ostreeReference
manifest string
schema manifestSchema
@@ -75,12 +87,33 @@ type ostreeImageDestination struct {
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
-func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) {
+func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
- return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil
+ d := &ostreeImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ manifest: "",
+ schema: manifestSchema{},
+ tmpDirPath: tmpDirPath,
+ blobs: map[string]*blobToImport{},
+ digest: "",
+ signaturesLen: 0,
+ repo: nil,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -97,103 +130,64 @@ func (d *ostreeImageDestination) Close() error {
return os.RemoveAll(d.tmpDirPath)
}
-func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
-func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.PreserveOriginal
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
+func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
defer blobFile.Close()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
- return types.BlobInfo{}, err
+ return private.UploadedBlob{}, err
}
- hash := computedDigest.Hex()
- d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath}
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ hash := blobDigest.Encoded()
+ d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
- entries, err := ioutil.ReadDir(dir)
+ entries, err := os.ReadDir(dir)
if err != nil {
return err
}
- for _, info := range entries {
- fullpath := filepath.Join(dir, info.Name())
- if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+ for _, entry := range entries {
+ fullpath := filepath.Join(dir, entry.Name())
+ if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if err := os.Remove(fullpath); err != nil {
return err
}
continue
}
+ info, err := entry.Info()
+ if err != nil {
+ return err
+ }
if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath)
if err != nil {
@@ -210,7 +204,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
- return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath)
+ return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
}
if int(res) == 0 {
defer C.freecon(context)
@@ -218,12 +212,12 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
- return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context))
+ return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
}
}
}
- if info.IsDir() {
+ if entry.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
@@ -233,7 +227,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
if err != nil {
return err
}
- } else if usermode && (info.Mode().IsRegular()) {
+ } else if usermode && (entry.Type().IsRegular()) {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err
}
@@ -288,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
- ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
- destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
+ destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
@@ -329,52 +323,57 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
- ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ return false, private.ReusedBlob{}, nil
+ }
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
d.repo = repo
}
- branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
+
+ if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return false, private.ReusedBlob{}, err
+ }
+ branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
- return found, types.BlobInfo{}, err
+ return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
- return found, types.BlobInfo{}, err
+ return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
- return found, types.BlobInfo{}, err
+ return found, private.ReusedBlob{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
- return false, types.BlobInfo{}, err
+ return false, private.ReusedBlob{}, err
}
- return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
+ return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
@@ -405,13 +404,14 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
}
d.digest = digest
- return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
+ return os.WriteFile(manifestPath, manifestBlob, 0644)
}
-// PutSignatures writes signatures to the destination.
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests.
-func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -423,7 +423,11 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
- if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
return err
}
}
@@ -431,7 +435,11 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
return nil
}
-func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ostreeImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@@ -450,7 +458,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
- return errors.Wrapf(err, "cannot open the SELinux DB")
+ return fmt.Errorf("cannot open the SELinux DB: %w", err)
}
defer C.selabel_close(selinuxHnd)
@@ -472,13 +480,19 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
return nil
}
for _, layer := range d.schema.LayersDescriptors {
- hash := layer.Digest.Hex()
+ if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return err
+ }
+ hash := layer.Digest.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
}
for _, layer := range d.schema.FSLayers {
- hash := layer.BlobSum.Hex()
+ if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return err
+ }
+ hash := layer.BlobSum.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
@@ -506,7 +520,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
}
func ensureDirectoryExists(path string) error {
- if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index 4948ec664..0b597ce26 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
@@ -6,20 +7,23 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"strconv"
"strings"
"unsafe"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -34,6 +38,10 @@ import (
import "C"
type ostreeImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
@@ -42,8 +50,19 @@ type ostreeImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) {
- return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil
+func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
+ s := &ostreeImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ tmpDir: tmpDir,
+ compressed: nil,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -132,9 +151,9 @@ func openRepo(path string) (*C.struct_OstreeRepo, error) {
var cerr *C.GError
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
- pathc := C.g_file_new_for_path(cpath)
- defer C.g_object_unref(C.gpointer(pathc))
- repo := C.ostree_repo_new(pathc)
+ file := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(file))
+ repo := C.ostree_repo_new(file)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
if !r {
C.g_object_unref(C.gpointer(repo))
@@ -171,7 +190,7 @@ func (o ostreeReader) Read(p []byte) (int, error) {
if count == 0 {
return 0, io.EOF
}
- data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
+ data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count)
copy(p, data)
return count, nil
}
@@ -263,17 +282,14 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
-
- blob := info.Digest.Hex()
+ if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return nil, -1, err
+ }
+ blob := info.Digest.Encoded()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
@@ -285,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
- blob = compressedBlob.Hex()
+ blob = compressedBlob.Encoded()
}
branch := fmt.Sprintf("ociimage/%s", blob)
@@ -339,10 +355,11 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return rc, layerSize, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -360,18 +377,23 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
s.repo = repo
}
- signatures := [][]byte{}
+ signatures := []signature.Signature{}
for i := int64(1); i <= lenSignatures; i++ {
- sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i))
+ path := fmt.Sprintf("/signature-%d", i)
+ sigReader, err := s.readSingleFile(branch, path)
if err != nil {
return nil, err
}
defer sigReader.Close()
- sig, err := ioutil.ReadAll(sigReader)
+ sigBlob, err := io.ReadAll(sigReader)
if err != nil {
return nil, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, sig)
}
return signatures, nil
@@ -402,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
- branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
+ branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
@@ -417,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
if err != nil {
return nil, err
}
- uncompressedDigest := digest.Digest(uncompressedDigestStr)
+ uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
+ if err != nil {
+ return nil, err
+ }
blobInfo := types.BlobInfo{
Digest: uncompressedDigest,
Size: uncompressedSize,
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index a55147b85..d83f85b90 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
@@ -5,18 +6,18 @@ package ostree
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
- "regexp"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/regexp"
)
const defaultOSTreeRepo = "/ostree/repo"
@@ -41,16 +42,16 @@ func init() {
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
- return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
- return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
- return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
@@ -74,12 +75,11 @@ type ostreeImageCloser struct {
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
- var image = ""
- s := strings.SplitN(ref, "@/", 2)
- if len(s) == 1 {
- image, repo = s[0], defaultOSTreeRepo
+ image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
+ if !gotRepoPart {
+ repo = defaultOSTreeRepo
} else {
- image, repo = s[0], "/"+s[1]
+ repo = "/" + repoPart
}
return NewReference(image, repo)
@@ -116,7 +116,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
+ return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
@@ -133,7 +133,7 @@ func (ref ostreeReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
@@ -156,11 +156,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
- s := strings.SplitN(ref.image, ":", 2)
- if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
+ repo, _, gotTag := strings.Cut(ref.image, ":")
+ if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
- name := s[0]
+ name := repo
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
@@ -183,17 +183,7 @@ func (s *ostreeImageCloser) Size() (int64, error) {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- var tmpDir string
- if sys == nil || sys.OSTreeTmpDirPath == "" {
- tmpDir = os.TempDir()
- } else {
- tmpDir = sys.OSTreeTmpDirPath
- }
- src, err := newImageSource(tmpDir, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -222,10 +212,10 @@ func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.S
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for ostree: images")
+ return errors.New("Deleting images not implemented for ostree: images")
}
-var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
+var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`)
func encodeOStreeRef(in string) string {
var buffer bytes.Buffer
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
deleted file mode 100644
index a472efd95..000000000
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Package boltdb implements a BlobInfoCache backed by BoltDB.
-package boltdb
-
-import (
- "fmt"
- "os"
- "sync"
- "time"
-
- "github.com/containers/image/v5/internal/blobinfocache"
- "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- "github.com/sirupsen/logrus"
- bolt "go.etcd.io/bbolt"
-)
-
-var (
- // NOTE: There is no versioning data inside the file; this is a “cacheâ€, so on an incompatible format upgrade
- // we can simply start over with a different filename; update blobInfoCacheFilename.
-
- // FIXME: For CRI-O, does this need to hide information between different users?
-
- // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
- uncompressedDigestBucket = []byte("uncompressedDigest")
- // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
- // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
- digestCompressorBucket = []byte("digestCompressor")
- // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
- // (as a set of key=digest, value="" pairs)
- digestByUncompressedBucket = []byte("digestByUncompressed")
- // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
- // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
- knownLocationsBucket = []byte("knownLocations")
-)
-
-// Concurrency:
-// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
-// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
-
-// pathLock contains a lock for a specific BoltDB database path.
-type pathLock struct {
- refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
- mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
-}
-
-var (
- // pathLocks contains a lock for each currently open file.
- // This must be global so that independently created instances of boltDBCache exclude each other.
- // The map is protected by pathLocksMutex.
- // FIXME? Should this be based on device:inode numbers instead of paths instead?
- pathLocks = map[string]*pathLock{}
- pathLocksMutex = sync.Mutex{}
-)
-
-// lockPath obtains the pathLock for path.
-// The caller must call unlockPath eventually.
-func lockPath(path string) {
- pl := func() *pathLock { // A scope for defer
- pathLocksMutex.Lock()
- defer pathLocksMutex.Unlock()
- pl, ok := pathLocks[path]
- if ok {
- pl.refCount++
- } else {
- pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
- pathLocks[path] = pl
- }
- return pl
- }()
- pl.mutex.Lock()
-}
-
-// unlockPath releases the pathLock for path.
-func unlockPath(path string) {
- pathLocksMutex.Lock()
- defer pathLocksMutex.Unlock()
- pl, ok := pathLocks[path]
- if !ok {
- // Should this return an error instead? BlobInfoCache ultimately ignores errors…
- panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
- }
- pl.mutex.Unlock()
- pl.refCount--
- if pl.refCount == 0 {
- delete(pathLocks, path)
- }
-}
-
-// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
-//
-// Note that we don’t keep the database open across operations, because that would lock the file and block any other
-// users; instead, we need to open/close it for every single write or lookup.
-type cache struct {
- path string
-}
-
-// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
-//
-// Most users should call blobinfocache.DefaultCache instead.
-func New(path string) types.BlobInfoCache {
- return new2(path)
-}
-func new2(path string) *cache {
- return &cache{path: path}
-}
-
-// view returns runs the specified fn within a read-only transaction on the database.
-func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
- // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
- // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
- // a read lock, blocking any future writes.
- // Hence this preliminary check, which is RACY: Another process could remove the file
- // between the Lstat call and opening the database.
- if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
- return err
- }
-
- lockPath(bdc.path)
- defer unlockPath(bdc.path)
- db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
- if err != nil {
- return err
- }
- defer func() {
- if err := db.Close(); retErr == nil && err != nil {
- retErr = err
- }
- }()
-
- return db.View(fn)
-}
-
-// update returns runs the specified fn within a read-write transaction on the database.
-func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
- lockPath(bdc.path)
- defer unlockPath(bdc.path)
- db, err := bolt.Open(bdc.path, 0600, nil)
- if err != nil {
- return err
- }
- defer func() {
- if err := db.Close(); retErr == nil && err != nil {
- retErr = err
- }
- }()
-
- return db.Update(fn)
-}
-
-// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
-func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
- if b := tx.Bucket(uncompressedDigestBucket); b != nil {
- if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
- d, err := digest.Parse(string(uncompressedBytes))
- if err == nil {
- return d
- }
- // FIXME? Log err (but throttle the log volume on repeated accesses)?
- }
- }
- // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
- // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
- // when we already record a (compressed, uncompressed) pair.
- if b := tx.Bucket(digestByUncompressedBucket); b != nil {
- if b = b.Bucket([]byte(anyDigest.String())); b != nil {
- c := b.Cursor()
- if k, _ := c.First(); k != nil { // The bucket is non-empty
- return anyDigest
- }
- }
- }
- return ""
-}
-
-// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
-// May return anyDigest if it is known to be uncompressed.
-// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
-func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
- var res digest.Digest
- if err := bdc.view(func(tx *bolt.Tx) error {
- res = bdc.uncompressedDigest(tx, anyDigest)
- return nil
- }); err != nil { // Including os.IsNotExist(err)
- return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
- }
- return res
-}
-
-// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
-// It’s allowed for anyDigest == uncompressed.
-// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
-// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
-// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
-func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
- _ = bdc.update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
- if err != nil {
- return err
- }
- key := []byte(anyDigest.String())
- if previousBytes := b.Get(key); previousBytes != nil {
- previous, err := digest.Parse(string(previousBytes))
- if err != nil {
- return err
- }
- if previous != uncompressed {
- logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
- }
- }
- if err := b.Put(key, []byte(uncompressed.String())); err != nil {
- return err
- }
-
- b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
- if err != nil {
- return err
- }
- b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
- if err != nil {
- return err
- }
- if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
- return err
- }
- return nil
- }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
-}
-
-// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
-// compressor, or is blobinfocache.Uncompressed.
-// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
-// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
-// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
-func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
- _ = bdc.update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
- if err != nil {
- return err
- }
- key := []byte(anyDigest.String())
- if previousBytes := b.Get(key); previousBytes != nil {
- if string(previousBytes) != compressorName {
- logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
- }
- }
- if compressorName == blobinfocache.UnknownCompression {
- return b.Delete(key)
- }
- return b.Put(key, []byte(compressorName))
- }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
-}
-
-// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
-// and can be reused given the opaque location data.
-func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
- _ = bdc.update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
- if err != nil {
- return err
- }
- b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
- if err != nil {
- return err
- }
- b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
- if err != nil {
- return err
- }
- b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
- if err != nil {
- return err
- }
- value, err := time.Now().MarshalBinary()
- if err != nil {
- return err
- }
- if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
- return err
- }
- return nil
- }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
-}
-
-// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
-func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
- digestKey := []byte(digest.String())
- b := scopeBucket.Bucket(digestKey)
- if b == nil {
- return candidates
- }
- compressorName := blobinfocache.UnknownCompression
- if compressionBucket != nil {
- // the bucket won't exist if the cache was created by a v1 implementation and
- // hasn't yet been updated by a v2 implementation
- if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
- compressorName = string(compressorNameValue)
- }
- }
- if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
- return candidates
- }
- _ = b.ForEach(func(k, v []byte) error {
- t := time.Time{}
- if err := t.UnmarshalBinary(v); err != nil {
- return err
- }
- candidates = append(candidates, prioritize.CandidateWithTime{
- Candidate: blobinfocache.BICReplacementCandidate2{
- Digest: digest,
- CompressorName: compressorName,
- Location: types.BICLocationReference{Opaque: string(k)},
- },
- LastSeen: t,
- })
- return nil
- }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
- return candidates
-}
-
-// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
-// within the specified (transport scope) (if they still exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
-// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
-// uncompressed digest.
-func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
- return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
-}
-
-func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
- res := []prioritize.CandidateWithTime{}
- var uncompressedDigestValue digest.Digest // = ""
- if err := bdc.view(func(tx *bolt.Tx) error {
- scopeBucket := tx.Bucket(knownLocationsBucket)
- if scopeBucket == nil {
- return nil
- }
- scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
- if scopeBucket == nil {
- return nil
- }
- scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
- if scopeBucket == nil {
- return nil
- }
- // compressionBucket won't have been created if previous writers never recorded info about compression,
- // and we don't want to fail just because of that
- compressionBucket := tx.Bucket(digestCompressorBucket)
-
- res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
- if canSubstitute {
- if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
- b := tx.Bucket(digestByUncompressedBucket)
- if b != nil {
- b = b.Bucket([]byte(uncompressedDigestValue.String()))
- if b != nil {
- if err := b.ForEach(func(k, _ []byte) error {
- d, err := digest.Parse(string(k))
- if err != nil {
- return err
- }
- if d != primaryDigest && d != uncompressedDigestValue {
- res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
- }
- return nil
- }); err != nil {
- return err
- }
- }
- }
- if uncompressedDigestValue != primaryDigest {
- res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
- }
- }
- }
- return nil
- }); err != nil { // Including os.IsNotExist(err)
- return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
- }
-
- return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
-}
-
-// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
-// within the specified (transport scope) (if they still exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
-// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
-// uncompressed digest.
-func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
- return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
index 83034b618..b413ec513 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
@@ -6,8 +6,8 @@ import (
"path/filepath"
"github.com/containers/image/v5/internal/rootless"
- "github.com/containers/image/v5/pkg/blobinfocache/boltdb"
"github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/pkg/blobinfocache/sqlite"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
@@ -15,7 +15,7 @@ import (
const (
// blobInfoCacheFilename is the file name used for blob info caches.
// If the format changes in an incompatible way, increase the version number.
- blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
+ blobInfoCacheFilename = "blob-info-cache-v1.sqlite"
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
systemBlobInfoCacheDir = "/var/lib/containers/cache"
)
@@ -57,10 +57,32 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
}
path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil {
- logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err)
return memory.New()
}
- logrus.Debugf("Using blob info cache at %s", path)
- return boltdb.New(path)
+ // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open
+ // as global singleton, for the vast majority of callers who don’t override thde cache location.
+ // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely,
+ // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%.
+
+ cache, err := sqlite.New(path)
+ if err != nil {
+ logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err)
+ return memory.New()
+ }
+ logrus.Debugf("Using SQLite blob info cache at %s", path)
+ return cache
+}
+
+// CleanupDefaultCache removes the blob info cache directory.
+// It deletes the cache directory but it does not affect any file or memory buffer currently
+// in use.
+func CleanupDefaultCache(sys *types.SystemContext) error {
+ dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
+ if err != nil {
+ // Mirror the DefaultCache behavior that does not fail in this case
+ return nil
+ }
+ return os.RemoveAll(dir)
}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
index 6f5506d94..d73aafbdb 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -1,110 +1,244 @@
-// Package prioritize provides utilities for prioritizing locations in
+// Package prioritize provides utilities for filtering and prioritizing locations in
// types.BlobInfoCache.CandidateLocations.
package prioritize
import (
- "sort"
+ "cmp"
+ "slices"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
)
-// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
+// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
// This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5
+// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
+// This is a heuristic/guess, and could well use a different value.
+const replacementUnknownLocationAttempts = 2
+
+// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
+// which can be later combined with information about a location.
+type CandidateTemplate struct {
+ digest digest.Digest
+ compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
+ compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+ compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
+}
+
+// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
+// for a CandidateLocations* call with v2Options.
+//
+// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
+// if not nil, the call is assumed to be CandidateLocations2.
+func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
+ if v2Options == nil {
+ return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
+ digest: digest,
+ }
+ }
+
+ requiredCompression := "nil"
+ if v2Options.RequiredCompression != nil {
+ requiredCompression = v2Options.RequiredCompression.Name()
+ }
+ switch data.BaseVariantCompressor {
+ case blobinfocache.Uncompressed:
+ if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+ PossibleManifestFormats: v2Options.PossibleManifestFormats,
+ RequiredCompression: v2Options.RequiredCompression,
+ }, nil) {
+ logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v",
+ digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
+ return nil
+ }
+ return &CandidateTemplate{
+ digest: digest,
+ compressionOperation: types.Decompress,
+ compressionAlgorithm: nil,
+ compressionAnnotations: nil,
+ }
+ case blobinfocache.UnknownCompression:
+ logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
+ return nil // Not allowed with CandidateLocations2
+ default:
+ // See if we can use the specific variant, first.
+ if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
+ if err != nil {
+ logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
+ data.SpecificVariantCompressor, digest.String(), err)
+ } else {
+ if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+ PossibleManifestFormats: v2Options.PossibleManifestFormats,
+ RequiredCompression: v2Options.RequiredCompression,
+ }, &algo) {
+ logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
+ data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
+ } else {
+ return &CandidateTemplate{
+ digest: digest,
+ compressionOperation: types.Compress,
+ compressionAlgorithm: &algo,
+ compressionAnnotations: data.SpecificVariantAnnotations,
+ }
+ }
+ }
+ }
+
+ // Try the base variant.
+ algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
+ if err != nil {
+ logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
+ digest.String(), data.BaseVariantCompressor, err)
+ return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
+ }
+ if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+ PossibleManifestFormats: v2Options.PossibleManifestFormats,
+ RequiredCompression: v2Options.RequiredCompression,
+ }, &algo) {
+ logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
+ digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats)
+ return nil
+ }
+ return &CandidateTemplate{
+ digest: digest,
+ compressionOperation: types.Compress,
+ compressionAlgorithm: &algo,
+ compressionAnnotations: nil,
+ }
+ }
+}
+
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
- Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
- LastSeen time.Time // Time the candidate was last known to exist (either read or written)
+ candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
+ lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
}
-// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
-// along with the specially-treated digest values for the implementation of sort.Interface.Less
-type candidateSortState struct {
- cs []CandidateWithTime // The entries to sort
- primaryDigest digest.Digest // The digest the user actually asked for
- uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen)
+func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
+ return CandidateWithTime{
+ candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: template.digest,
+ CompressionOperation: template.compressionOperation,
+ CompressionAlgorithm: template.compressionAlgorithm,
+ CompressionAnnotations: template.compressionAnnotations,
+ UnknownLocation: false,
+ Location: location,
+ },
+ lastSeen: lastSeen,
+ }
}
-func (css *candidateSortState) Len() int {
- return len(css.cs)
+// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location.
+func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
+ return CandidateWithTime{
+ candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: template.digest,
+ CompressionOperation: template.compressionOperation,
+ CompressionAlgorithm: template.compressionAlgorithm,
+ CompressionAnnotations: template.compressionAnnotations,
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
+ },
+ lastSeen: time.Time{},
+ }
}
-func (css *candidateSortState) Less(i, j int) bool {
- xi := css.cs[i]
- xj := css.cs[j]
+// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
+// along with the specially-treated digest values relevant to the ordering.
+type candidateSortState struct {
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
// primaryDigest entries come first, more recent first.
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
// First, deal with the primaryDigest/uncompressedDigest cases:
- if xi.Candidate.Digest != xj.Candidate.Digest {
+ if xi.candidate.Digest != xj.candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
- if xi.Candidate.Digest == css.primaryDigest {
- return true
+ if xi.candidate.Digest == css.primaryDigest {
+ return -1
}
- if xj.Candidate.Digest == css.primaryDigest {
- return false
+ if xj.candidate.Digest == css.primaryDigest {
+ return 1
}
if css.uncompressedDigest != "" {
- if xi.Candidate.Digest == css.uncompressedDigest {
- return false
+ if xi.candidate.Digest == css.uncompressedDigest {
+ return 1
}
- if xj.Candidate.Digest == css.uncompressedDigest {
- return true
+ if xj.candidate.Digest == css.uncompressedDigest {
+ return -1
}
}
} else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
- if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
- return xi.LastSeen.After(xj.LastSeen)
+ if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
+ return -xi.lastSeen.Compare(xj.lastSeen)
}
}
// Neither of the digests are primaryDigest/uncompressedDigest:
- if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
- return xi.LastSeen.After(xj.LastSeen)
+ if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time
+ return -cmp
}
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
- return xi.Candidate.Digest < xj.Candidate.Digest
+ return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest)
}
-func (css *candidateSortState) Swap(i, j int) {
- css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
-}
-
-// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
-// number of entries to limit, only to make testing simpler.
-func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
+// number of entries to limit for known and unknown location separately, only to make testing simpler.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
+ // split unknown candidates and known candidates
+ // and limit them separately.
+ var knownLocationCandidates []CandidateWithTime
+ var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
- sort.Sort(&candidateSortState{
- cs: cs,
+ slices.SortFunc(cs, (&candidateSortState{
primaryDigest: primaryDigest,
uncompressedDigest: uncompressedDigest,
- })
+ }).compare)
+ for _, candidate := range cs {
+ if candidate.candidate.UnknownLocation {
+ unknownLocationCandidates = append(unknownLocationCandidates, candidate)
+ } else {
+ knownLocationCandidates = append(knownLocationCandidates, candidate)
+ }
+ }
- resLength := len(cs)
- if resLength > maxCandidates {
- resLength = maxCandidates
+ knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
+ remainingCapacity := totalLimit - knownLocationCandidatesUsed
+ unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
+ res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
+ for i := 0; i < knownLocationCandidatesUsed; i++ {
+ res[i] = knownLocationCandidates[i].candidate
}
- res := make([]blobinfocache.BICReplacementCandidate2, resLength)
- for i := range res {
- res[i] = cs[i].Candidate
+ // If candidates with unknown location are found, lets add them to final list
+ for i := 0; i < unknownLocationCandidatesUsed; i++ {
+ res = append(res, unknownLocationCandidates[i].candidate)
}
return res
}
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
-// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
-// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
+// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
- return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
index 426640366..9d4125d66 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@@ -19,14 +20,15 @@ type locationKey struct {
blobDigest digest.Digest
}
-// cache implements an in-memory-only BlobInfoCache
+// cache implements an in-memory-only BlobInfoCache.
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
- uncompressedDigests map[digest.Digest]digest.Digest
- digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
- knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
- compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
+ uncompressedDigests map[digest.Digest]digest.Digest
+ uncompressedDigestsByTOC map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+ compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
}
// New returns a BlobInfoCache implementation which is in-memory only.
@@ -43,13 +45,23 @@ func New() types.BlobInfoCache {
func new2() *cache {
return &cache{
- uncompressedDigests: map[digest.Digest]digest.Digest{},
- digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
- knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
- compressors: map[digest.Digest]string{},
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
}
}
+// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+func (mem *cache) Open() {
+}
+
+// Close destroys state created by Open().
+func (mem *cache) Close() {
+}
+
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
@@ -67,7 +79,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
- if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
+ if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
return anyDigest
}
return ""
@@ -88,10 +100,34 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
if !ok {
- anyDigestSet = map[digest.Digest]struct{}{}
+ anyDigestSet = set.New[digest.Digest]()
mem.digestsByUncompressed[uncompressed] = anyDigestSet
}
- anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+ anyDigestSet.Add(anyDigest)
+}
+
+// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+// Returns "" if the uncompressed digest is unknown.
+func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok {
+ return d
+ }
+ return ""
+}
+
+// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigestsByTOC[tocDigest] = uncompressed
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
@@ -108,37 +144,68 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
locationScope[location] = time.Now() // Possibly overwriting an older entry.
}
-// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
-// algorithm, or uncompressed, or that we no longer know.
-func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
+// RecordDigestCompressorData records data for the blob with the specified digest.
+// WARNING: Only call this with LOCALLY VERIFIED data:
+// - don’t record a compressor for a digest just because some remote author claims so
+// (e.g. because a manifest says so);
+// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+// in a manifest)
+//
+// otherwise the cache could be poisoned and cause us to make incorrect edits to type
+// information in a manifest.
+func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
- if compressorName == blobinfocache.UnknownCompression {
- delete(mem.compressors, blobDigest)
+ if previous, ok := mem.compressors[anyDigest]; ok {
+ if previous.BaseVariantCompressor != data.BaseVariantCompressor {
+ logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
+ } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
+ previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
+ logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
+ }
+ // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic.
+
+ // Preserve specific variant information if the incoming data does not have it.
+ if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
+ previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ data.SpecificVariantCompressor = previous.SpecificVariantCompressor
+ data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
+ }
+ }
+ if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
+ delete(mem.compressors, anyDigest)
return
}
- mem.compressors[blobDigest] = compressorName
+ mem.compressors[anyDigest] = data
}
-// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
-func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
+// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
+// with corresponding compression info from mem.compressors, and returns the result of appending
+// them to candidates.
+// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
+// with unknown compression.
+func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
+ v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
+ compressionData := blobinfocache.DigestCompressorData{
+ BaseVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
+ }
+ if v, ok := mem.compressors[digest]; ok {
+ compressionData = v
+ }
+ template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
+ if template == nil {
+ return candidates
+ }
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
- for l, t := range locations {
- compressorName, compressorKnown := mem.compressors[digest]
- if !compressorKnown {
- if requireCompressionInfo {
- continue
- }
- compressorName = blobinfocache.UnknownCompression
+ if len(locations) > 0 {
+ for l, t := range locations {
+ candidates = append(candidates, template.CandidateWithLocation(l, t))
}
- candidates = append(candidates, prioritize.CandidateWithTime{
- Candidate: blobinfocache.BICReplacementCandidate2{
- Digest: digest,
- CompressorName: compressorName,
- Location: l,
- },
- LastSeen: t,
- })
+ } else if v2Options != nil {
+ candidates = append(candidates, template.CandidateWithUnknownLocation())
}
return candidates
}
@@ -150,35 +217,37 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
- return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
+ return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
}
-// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
-// within the specified (transport scope) (if they still exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
-// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
-// uncompressed digest.
-func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
- return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+// that could possibly be reused within the specified (transport scope) (if they still
+// exist, which is not guaranteed).
+func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+ return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
}
-func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
+// candidateLocations implements CandidateLocations / CandidateLocations2.
+// v2Options is not nil if the caller is CandidateLocations2.
+func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
+ v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []prioritize.CandidateWithTime{}
- res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
- for d := range otherDigests {
- if d != primaryDigest && d != uncompressedDigest {
- res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
+ if otherDigests != nil {
+ for _, d := range otherDigests.Values() {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
+ }
}
}
if uncompressedDigest != primaryDigest {
- res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options)
}
}
}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
index 4b7122f92..9a2219e79 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
@@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
}
+// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+// Returns "" if the uncompressed digest is unknown.
+func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+}
+
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
new file mode 100644
index 000000000..1a7931023
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
@@ -0,0 +1,671 @@
+// Package boltdb implements a BlobInfoCache backed by SQLite.
+package sqlite
+
+import (
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
+ _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // NOTE: There is no versioning data inside the file; this is a “cacheâ€, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+ // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema
+ // (which would require compatibility in both directions).
+
+ // Assembled sqlite options used when opening the database.
+ sqliteOptions = "?" +
+ // Deal with timezone automatically.
+ // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset.
+ // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or)
+ // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location;
+ // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used).
+ "_loc=auto" +
+ // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous).
+ "&_sync=FULL" +
+ // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys).
+ // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons).
+ "&_foreign_keys=1" +
+ // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html);
+ // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock,
+ // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously).
+ //
+ // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked†error,
+ // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous
+ // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented.
+ // Compare https://github.com/mattn/go-sqlite3/issues/274 .
+ //
+ // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167
+ // or https://github.com/mattn/go-sqlite3/issues/400 .
+ // The currently-proposed workaround is to create two different SQL “databases†(= connection pools) with different _txlock settings,
+ // which seems rather wasteful.
+ "&_txlock=exclusive"
+)
+
+// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path.
+type cache struct {
+ path string
+
+ // The database/sql package says “It is rarely necessary to close a DB.â€, and steers towards a long-term *sql.DB connection pool.
+ // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily
+ // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
+ // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have
+ // a Close method, so creating a lot of single-use caches could leak data.
+ //
+ // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image.
+ // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open.
+ // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single
+ // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every
+ // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation,
+ // somewhere around "700").
+
+ lock sync.Mutex
+ // The following fields can only be accessed with lock held.
+ refCount int // number of outstanding Open() calls
+ db *sql.DB // nil if not set (may happen even if refCount > 0 on errors)
+}
+
+// New returns BlobInfoCache implementation which uses a SQLite file at path.
+//
+// Most users should call blobinfocache.DefaultCache instead.
+func New(path string) (types.BlobInfoCache, error) {
+ return new2(path)
+}
+
+func new2(path string) (*cache, error) {
+ db, err := rawOpen(path)
+ if err != nil {
+ return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
+ }
+ defer db.Close()
+
+ // We don’t check the schema before every operation, because that would be costly
+ // and because we assume schema changes will be handled by using a different path.
+ if err := ensureDBHasCurrentSchema(db); err != nil {
+ return nil, err
+ }
+
+ return &cache{
+ path: path,
+ refCount: 0,
+ db: nil,
+ }, nil
+}
+
+// rawOpen returns a new *sql.DB for path.
+// The caller should arrange for it to be .Close()d.
+func rawOpen(path string) (*sql.DB, error) {
+ // This exists to centralize the use of sqliteOptions.
+ return sql.Open("sqlite3", path+sqliteOptions)
+}
+
+// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+func (sqc *cache) Open() {
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ if sqc.refCount == 0 {
+ db, err := rawOpen(sqc.path)
+ if err != nil {
+ logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
+ db = nil // But still increase sqc.refCount, because a .Close() will happen
+ }
+ sqc.db = db
+ }
+ sqc.refCount++
+}
+
+// Close destroys state created by Open().
+func (sqc *cache) Close() {
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ switch sqc.refCount {
+ case 0:
+ logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()")
+ return
+ case 1:
+ if sqc.db != nil {
+ sqc.db.Close()
+ sqc.db = nil
+ }
+ }
+ sqc.refCount--
+}
+
+type void struct{} // So that we don’t have to write struct{}{} all over the place
+
+// transaction calls fn within a read-write transaction in sqc.
+func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
+ db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ if sqc.db != nil {
+ return sqc.db, func() {}, nil
+ }
+ db, err := rawOpen(sqc.path)
+ if err != nil {
+ return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
+ }
+ return db, func() { db.Close() }, nil
+ }()
+ if err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, err
+ }
+ defer closeDB()
+
+ return dbTransaction(db, fn)
+}
+
+// dbTransaction calls fn within a read-write transaction in db.
+func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
+ // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion.
+
+ var zeroRes T // A zero value of T
+
+ tx, err := db.Begin()
+ if err != nil {
+ return zeroRes, fmt.Errorf("beginning transaction: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ if err := tx.Rollback(); err != nil {
+ logrus.Errorf("Rolling back transaction: %v", err)
+ }
+ }
+ }()
+
+ res, err := fn(tx)
+ if err != nil {
+ return zeroRes, err
+ }
+ if err := tx.Commit(); err != nil {
+ return zeroRes, fmt.Errorf("committing transaction: %w", err)
+ }
+
+ succeeded = true
+ return res, nil
+}
+
+// querySingleValue executes a SELECT which is expected to return at most one row with a single column.
+// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned.
+func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) {
+ var value T
+ if err := tx.QueryRow(query, params...).Scan(&value); err != nil {
+ var zeroValue T // A zero value of T
+ if errors.Is(err, sql.ErrNoRows) {
+ return zeroValue, false, nil
+ }
+ return zeroValue, false, err
+ }
+ return value, true, nil
+}
+
+// ensureDBHasCurrentSchema adds the necessary tables and indices to a database.
+// This is typically used when creating a previously-nonexistent database.
+// We don’t really anticipate schema migrations; with c/image usually vendored, not using
+// shared libraries, migrating a schema on an existing database would affect old-version users.
+// Instead, schema changes are likely to be implemented by using a different cache file name,
+// and leaving existing caches around for old users.
+func ensureDBHasCurrentSchema(db *sql.DB) error {
+ // Considered schema design alternatives:
+ //
+ // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring
+ // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.)
+ //
+ // * This schema uses the text representation of digests.
+ //
+ // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation;
+ // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm
+ // is documented to “define the string encoding†suggests that assuming a hexadecimal representation and turning that
+ // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm
+ // — and that would require us to implement two code paths, one of them basically never exercised / never tested.
+ //
+ // * There are two separate items for recording the uncompressed digest and digest compressors.
+ // Alternatively, we could have a single "digest facts" table with NULLable columns.
+ //
+ // The way the BlobInfoCache API works, we are only going to write one value at a time, so
+ // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples).
+ // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs
+ // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then
+ // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key).
+ //
+ // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to
+ // do a more verbose ON CONFLICT(…) DO UPDATE SET … = ….
+ //
+ // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
+ //
+ // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
+ // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
+ // is probably costlier than comparing a few more bytes of data.
+ //
+ // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
+ // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally).
+ //
+ items := []struct{ itemName, command string }{
+ {
+ "DigestUncompressedPairs",
+ `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` +
+ // index implied by PRIMARY KEY
+ `anyDigest TEXT PRIMARY KEY NOT NULL,` +
+ // DigestUncompressedPairs_index_uncompressedDigest
+ `uncompressedDigest TEXT NOT NULL
+ )`,
+ },
+ {
+ "DigestUncompressedPairs_index_uncompressedDigest",
+ `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`,
+ },
+ {
+ "DigestCompressors",
+ `CREATE TABLE IF NOT EXISTS DigestCompressors(` +
+ // index implied by PRIMARY KEY
+ `digest TEXT PRIMARY KEY NOT NULL,` +
+ // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
+ `compressor TEXT NOT NULL
+ )`,
+ },
+ {
+ "KnownLocations",
+ `CREATE TABLE IF NOT EXISTS KnownLocations(
+ transport TEXT NOT NULL,
+ scope TEXT NOT NULL,
+ digest TEXT NOT NULL,
+ location TEXT NOT NULL,` +
+ // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics)
+ // format "2006-01-02 15:04:05.999999999-07:00".
+ // See also the _loc option in the sql.Open data source name.
+ `time TIMESTAMP NOT NULL,` +
+ // Implies an index.
+ // We also search by (transport, scope, digest), that doesn’t need an extra index
+ // because it is a prefix of the implied primary-key index.
+ `PRIMARY KEY (transport, scope, digest, location)
+ )`,
+ },
+ {
+ "DigestTOCUncompressedPairs",
+ `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` +
+ // index implied by PRIMARY KEY
+ `tocDigest TEXT PRIMARY KEY NOT NULL,` +
+ `uncompressedDigest TEXT NOT NULL
+ )`,
+ },
+ {
+ "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
+ `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
+ // index implied by PRIMARY KEY
+ `digest TEXT PRIMARY KEY NOT NULL,` +
+ // The compressor is not `UnknownCompression`.
+ `specificVariantCompressor TEXT NOT NULL,
+ specificVariantAnnotations BLOB NOT NULL
+ )`,
+ },
+ }
+
+ _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
+ // If the the last-created item exists, assume nothing needs to be done.
+ lastItemName := items[len(items)-1].itemName
+ _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName)
+ if err != nil {
+ return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err)
+ }
+ if !found {
+ // Item does not exist, assuming a fresh database.
+ for _, i := range items {
+ if _, err := tx.Exec(i.command); err != nil {
+ return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err)
+ }
+ }
+ }
+ return void{}, nil
+ })
+ return err
+}
+
+// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction.
+func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) {
+ uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
+ if err != nil {
+ return "", err
+ }
+ if found {
+ d, err := digest.Parse(uncompressedString)
+ if err != nil {
+ return "", err
+ }
+ return d, nil
+
+ }
+ // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String())
+ if err != nil {
+ return "", err
+ }
+ if found {
+ return anyDigest, nil
+ }
+ return "", nil
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
+ return sqc.uncompressedDigest(tx, anyDigest)
+ })
+ if err != nil {
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest)
+ }
+ if gotPrevious {
+ previous, err := digest.Parse(previousString)
+ if err != nil {
+ return void{}, err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)",
+ anyDigest.String(), uncompressed.String()); err != nil {
+ return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err)
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+// Returns "" if the uncompressed digest is unknown.
+func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+ res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
+ uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
+ if err != nil {
+ return "", err
+ }
+ if found {
+ d, err := digest.Parse(uncompressedString)
+ if err != nil {
+ return "", err
+ }
+ return d, nil
+
+ }
+ return "", nil
+ })
+ if err != nil {
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest)
+ }
+ if gotPrevious {
+ previous, err := digest.Parse(previousString)
+ if err != nil {
+ return void{}, err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
+ }
+ }
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)",
+ tocDigest.String(), uncompressed.String()); err != nil {
+ return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err)
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)",
+ transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry.
+ return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w",
+ location.Opaque, transport.Name(), scope.Opaque, digest.String(), err)
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordDigestCompressorData records data for the blob with the specified digest.
+// WARNING: Only call this with LOCALLY VERIFIED data:
+// - don’t record a compressor for a digest just because some remote author claims so
+// (e.g. because a manifest says so);
+// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+// in a manifest)
+//
+// otherwise the cache could be poisoned and cause us to make incorrect edits to type
+// information in a manifest.
+func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
+ }
+ warned := false
+ if gotPrevious && previous != data.BaseVariantCompressor {
+ logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
+ warned = true
+ }
+ if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
+ if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
+ return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
+ }
+ if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
+ return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
+ }
+ } else {
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
+ anyDigest.String(), data.BaseVariantCompressor); err != nil {
+ return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
+ }
+ }
+
+ if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ if !warned { // Don’t warn twice about the same digest
+ prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
+ }
+ if found && data.SpecificVariantCompressor != prevSVC {
+ logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
+ }
+ }
+ annotations, err := json.Marshal(data.SpecificVariantAnnotations)
+ if err != nil {
+ return void{}, err
+ }
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
+ anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
+ return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
+ }
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
+// and returns the result of appending them to candidates.
+// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
+// with unknown compression.
+func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
+ v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
+ compressionData := blobinfocache.DigestCompressorData{
+ BaseVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
+ }
+ if v2Options != nil {
+ var baseVariantCompressor string
+ var specificVariantCompressor sql.NullString
+ var annotationBytes []byte
+ switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
+ "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
+ Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
+ case errors.Is(err, sql.ErrNoRows): // Do nothing
+ case err != nil:
+ return nil, fmt.Errorf("scanning compressor data: %w", err)
+ default:
+ compressionData.BaseVariantCompressor = baseVariantCompressor
+ if specificVariantCompressor.Valid && annotationBytes != nil {
+ compressionData.SpecificVariantCompressor = specificVariantCompressor.String
+ if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
+ if template == nil {
+ return candidates, nil
+ }
+
+ rows, err := tx.Query("SELECT location, time FROM KnownLocations "+
+ "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
+ transport.Name(), scope.Opaque, digest.String())
+ if err != nil {
+ return nil, fmt.Errorf("looking up candidate locations: %w", err)
+ }
+ defer rows.Close()
+
+ rowAdded := false
+ for rows.Next() {
+ var location string
+ var time time.Time
+ if err := rows.Scan(&location, &time); err != nil {
+ return nil, fmt.Errorf("scanning candidate: %w", err)
+ }
+ candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time))
+ rowAdded = true
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("iterating through locations: %w", err)
+ }
+
+ if !rowAdded && v2Options != nil {
+ candidates = append(candidates, template.CandidateWithUnknownLocation())
+ }
+ return candidates, nil
+}
+
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+// that could possibly be reused within the specified (transport scope) (if they still
+// exist, which is not guaranteed).
+func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+ return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options)
+}
+
+// candidateLocations implements CandidateLocations / CandidateLocations2.
+// v2Options is not nil if the caller is CandidateLocations2.
+func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
+ v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+ var uncompressedDigest digest.Digest // = ""
+ res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
+ res := []prioritize.CandidateWithTime{}
+ res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options)
+ if err != nil {
+ return nil, err
+ }
+ if canSubstitute {
+ uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest)
+ if err != nil {
+ return nil, err
+ }
+ if uncompressedDigest != "" {
+ // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
+ // (In the extreme, we could turn _everything_ this function does into a single query.
+ // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
+ // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
+ rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
+ if err != nil {
+ return nil, fmt.Errorf("querying for other digests: %w", err)
+ }
+ defer rows.Close()
+ for rows.Next() {
+ var otherDigestString string
+ if err := rows.Scan(&otherDigestString); err != nil {
+ return nil, fmt.Errorf("scanning other digest: %w", err)
+ }
+ otherDigest, err := digest.Parse(otherDigestString)
+ if err != nil {
+ return nil, err
+ }
+ if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("iterating through other digests: %w", err)
+ }
+
+ if uncompressedDigest != primaryDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ return res, nil
+ })
+ if err != nil {
+ return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil))
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index d5cfd8d31..782c86d06 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -5,12 +5,11 @@ import (
"compress/bzip2"
"fmt"
"io"
- "io/ioutil"
"github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -20,19 +19,27 @@ type Algorithm = types.Algorithm
var (
// Gzip compression.
- Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
+ Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "",
+ []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
// Bzip2 compression.
- Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
+ Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "",
+ []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
// Xz compression.
- Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
+ Xz = internal.NewAlgorithm(types.XzAlgorithmName, "",
+ []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
// Zstd compression.
- Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
+ Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "",
+ []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
+ // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files.
+ ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName,
+ nil, ZstdDecompressor, compressor.ZstdCompressor)
compressionAlgorithms = map[string]Algorithm{
- Gzip.Name(): Gzip,
- Bzip2.Name(): Bzip2,
- Xz.Name(): Xz,
- Zstd.Name(): Zstd,
+ Gzip.Name(): Gzip,
+ Bzip2.Name(): Bzip2,
+ Xz.Name(): Xz,
+ Zstd.Name(): Zstd,
+ ZstdChunked.Name(): ZstdChunked,
}
)
@@ -56,7 +63,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
- return ioutil.NopCloser(bzip2.NewReader(r)), nil
+ return io.NopCloser(bzip2.NewReader(r)), nil
}
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
@@ -65,11 +72,11 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
- return ioutil.NopCloser(r), nil
+ return io.NopCloser(r), nil
}
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
-func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
+func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
if level != nil {
return pgzip.NewWriterLevel(r, *level)
}
@@ -77,18 +84,35 @@ func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
}
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
-func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) {
+func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
return nil, fmt.Errorf("bzip2 compression not supported")
}
// xzCompressor is a CompressorFunc for the xz compression algorithm.
-func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
+func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
return xz.NewWriter(r)
}
// CompressStream returns the compressor by its name
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
- return internal.AlgorithmCompressor(algo)(dest, level)
+ m := map[string]string{}
+ return internal.AlgorithmCompressor(algo)(dest, m, level)
+}
+
+// CompressStreamWithMetadata returns the compressor by its name.
+//
+// Compressing a stream may create integrity data that allows consuming the compressed byte stream
+// while only using subsets of the compressed data (if the compressed data is seekable and most
+// of the uncompressed data is already present via other means), while still protecting integrity
+// of the compressed stream against unwanted modification. (In OCI container images, this metadata
+// is usually carried in manifest annotations.)
+//
+// Such a partial decompression is not implemented by this package; it is consumed e.g. by
+// github.com/containers/storage/pkg/chunked .
+//
+// If the compression generates such metadata, it is written to the provided metadata map.
+func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) {
+ return internal.AlgorithmCompressor(algo)(dest, metadata, level)
}
// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid
@@ -107,7 +131,8 @@ func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.R
var retAlgo Algorithm
var decompressor DecompressorFunc
for _, algo := range compressionAlgorithms {
- if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) {
+ prefix := internal.AlgorithmPrefix(algo)
+ if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
logrus.Debugf("Detected compression format %s", algo.Name())
retAlgo = algo
decompressor = internal.AlgorithmDecompressor(algo)
@@ -135,16 +160,16 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
decompressor, stream, err := DetectCompression(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "Error detecting compression")
+ return nil, false, fmt.Errorf("detecting compression: %w", err)
}
var res io.ReadCloser
if decompressor != nil {
res, err = decompressor(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "Error initializing decompression")
+ return nil, false, fmt.Errorf("initializing decompression: %w", err)
}
} else {
- res = ioutil.NopCloser(stream)
+ res = io.NopCloser(stream)
}
return res, decompressor != nil, nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
index 6092a9517..e715705b4 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
@@ -3,8 +3,17 @@ package internal
import "io"
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
+//
+// Compressing a stream may create integrity data that allows consuming the compressed byte stream
+// while only using subsets of the compressed data (if the compressed data is seekable and most
+// of the uncompressed data is already present via other means), while still protecting integrity
+// of the compressed stream against unwanted modification. (In OCI container images, this metadata
+// is usually carried in manifest annotations.)
+//
+// If the compression generates such metadata, it is written to the provided metadata map.
+//
// The caller must call Close() on the stream (even if the input stream does not need closing!).
-type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error)
+type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
// DecompressorFunc returns the decompressed stream, given a compressed stream.
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
@@ -12,21 +21,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
// Algorithm is a compression algorithm that can be used for CompressStream.
type Algorithm struct {
- name string
- prefix []byte
- decompressor DecompressorFunc
- compressor CompressorFunc
+ name string
+ baseVariantName string
+ prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
+ decompressor DecompressorFunc
+ compressor CompressorFunc
}
// NewAlgorithm creates an Algorithm instance.
+// nontrivialBaseVariantName is typically "".
// This function exists so that Algorithm instances can only be created by code that
// is allowed to import this internal subpackage.
-func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
+func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
+ baseVariantName := name
+ if nontrivialBaseVariantName != "" {
+ baseVariantName = nontrivialBaseVariantName
+ }
return Algorithm{
- name: name,
- prefix: prefix,
- decompressor: decompressor,
- compressor: compressor,
+ name: name,
+ baseVariantName: baseVariantName,
+ prefix: prefix,
+ decompressor: decompressor,
+ compressor: compressor,
}
}
@@ -35,22 +51,29 @@ func (c Algorithm) Name() string {
return c.name
}
+// BaseVariantName returns the name of the “base variant†of the compression algorithm.
+// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variantâ€).
+// This supports a single level of “is-a†relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data.
+func (c Algorithm) BaseVariantName() string {
+ return c.baseVariantName
+}
+
// AlgorithmCompressor returns the compressor field of algo.
-// This is a function instead of a public method so that it is only callable from by code
+// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmCompressor(algo Algorithm) CompressorFunc {
return algo.compressor
}
// AlgorithmDecompressor returns the decompressor field of algo.
-// This is a function instead of a public method so that it is only callable from by code
+// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
return algo.decompressor
}
// AlgorithmPrefix returns the prefix field of algo.
-// This is a function instead of a public method so that it is only callable from by code
+// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmPrefix(algo Algorithm) []byte {
return algo.prefix
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
index f96eff2e3..43d03b601 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
@@ -11,3 +11,31 @@ type DecompressorFunc = internal.DecompressorFunc
// Algorithm is a compression algorithm provided and supported by pkg/compression.
// It can’t be supplied from the outside.
type Algorithm = internal.Algorithm
+
+const (
+ // GzipAlgorithmName is the name used by pkg/compression.Gzip.
+ // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ GzipAlgorithmName = "gzip"
+ // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
+ // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ Bzip2AlgorithmName = "bzip2"
+ // XzAlgorithmName is the name used by pkg/compression.Xz.
+ // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ XzAlgorithmName = "Xz"
+ // ZstdAlgorithmName is the name used by pkg/compression.Zstd.
+ // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdAlgorithmName = "zstd"
+ // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
+ // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdChunkedAlgorithmName = "zstd:chunked"
+)
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go
index 962fe9676..39ae014d2 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go
@@ -40,13 +40,13 @@ func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
}
-func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) {
+func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
el := zstd.EncoderLevelFromZstd(level)
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
}
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
-func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
+func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
if level == nil {
return zstdWriter(r)
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index b84aac6e4..da2238a0b 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -3,21 +3,25 @@ package config
import (
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
+ "io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/multierr"
+ "github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/ioutils"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
- "github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -31,11 +35,6 @@ type dockerConfigFile struct {
CredHelpers map[string]string `json:"credHelpers,omitempty"`
}
-type authPath struct {
- path string
- legacyFormat bool
-}
-
var (
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
xdgConfigHomePath = filepath.FromSlash("containers/auth.json")
@@ -51,42 +50,17 @@ var (
ErrNotSupported = errors.New("not supported")
)
-// SetAuthentication stores the username and password in the credential helper or file
-func SetAuthentication(sys *types.SystemContext, registry, username, password string) error {
- helpers, err := sysregistriesv2.CredentialHelpers(sys)
- if err != nil {
- return err
- }
+// authPath combines a path to a file with container registry credentials,
+// along with expected properties of that path (currently just whether it's
+// legacy format or not).
+type authPath struct {
+ path string
+ legacyFormat bool
+}
- // Make sure to collect all errors.
- var multiErr error
- for _, helper := range helpers {
- var err error
- switch helper {
- // Special-case the built-in helpers for auth files.
- case sysregistriesv2.AuthenticationFileHelper:
- err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
- if ch, exists := auths.CredHelpers[registry]; exists {
- return false, setAuthToCredHelper(ch, registry, username, password)
- }
- creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
- newCreds := dockerAuthConfig{Auth: creds}
- auths.AuthConfigs[registry] = newCreds
- return true, nil
- })
- // External helpers.
- default:
- err = setAuthToCredHelper(helper, registry, username, password)
- }
- if err != nil {
- multiErr = multierror.Append(multiErr, err)
- logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", registry, helper, err)
- continue
- }
- logrus.Debugf("Stored credentials for %s in credential helper %s", registry, helper)
- return nil
- }
- return multiErr
+// newAuthPathDefault constructs an authPath in non-legacy format.
+func newAuthPathDefault(path string) authPath {
+ return authPath{path: path, legacyFormat: false}
}
// GetAllCredentials returns the registry credentials for all registries stored
@@ -96,10 +70,11 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// possible sources, and then call `GetCredentials` on them. That
// prevents us from having to reverse engineer the logic in
// `GetCredentials`.
- allRegistries := make(map[string]bool)
- addRegistry := func(s string) {
- allRegistries[s] = true
- }
+ allKeys := set.New[string]()
+
+ // To use GetCredentials, we must at least convert the URL forms into host names.
+ // While we're at it, we’ll also canonicalize docker.io to the standard format.
+ normalizedDockerIORegistry := normalizeRegistry("docker.io")
helpers, err := sysregistriesv2.CredentialHelpers(sys)
if err != nil {
@@ -110,57 +85,57 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
- // readJSONFile returns an empty map in case the path doesn't exist.
- auths, err := readJSONFile(path.path, path.legacyFormat)
+ // parse returns an empty map in case the path doesn't exist.
+ fileContents, err := path.parse()
if err != nil {
- return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
+ return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
// Credential helpers in the auth file have a
// direct mapping to a registry, so we can just
// walk the map.
- for registry := range auths.CredHelpers {
- addRegistry(registry)
+ for registry := range fileContents.CredHelpers {
+ allKeys.Add(registry)
}
- for registry := range auths.AuthConfigs {
- addRegistry(registry)
+ for key := range fileContents.AuthConfigs {
+ key := normalizeAuthFileKey(key, path.legacyFormat)
+ if key == normalizedDockerIORegistry {
+ key = "docker.io"
+ }
+ allKeys.Add(key)
}
}
// External helpers.
default:
- creds, err := listAuthsFromCredHelper(helper)
+ creds, err := listCredsInCredHelper(helper)
if err != nil {
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
- }
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- addRegistry(registry)
+ if errors.Is(err, exec.ErrNotFound) {
+ creds = nil // It's okay if the helper doesn't exist.
+ } else {
+ return nil, err
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- default:
- return nil, err
+ }
+ for registry := range creds {
+ allKeys.Add(registry)
}
}
}
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
- authConfigs := make(map[string]types.DockerAuthConfig)
- for registry := range allRegistries {
- authConf, err := GetCredentials(sys, registry)
+ allCreds := make(map[string]types.DockerAuthConfig)
+ for _, key := range allKeys.Values() {
+ creds, err := GetCredentials(sys, key)
if err != nil {
- if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
- // Ignore if the credentials could not be found (anymore).
- continue
- }
// Note: we rely on the logging in `GetCredentials`.
return nil, err
}
- authConfigs[registry] = authConf
+ if creds != (types.DockerAuthConfig{}) {
+ allCreds[key] = creds
+ }
}
- return authConfigs, nil
+ return allCreds, nil
}
// getAuthFilePaths returns a slice of authPaths based on the system context
@@ -169,65 +144,87 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// by tests.
func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
paths := []authPath{}
- pathToAuth, lf, err := getPathToAuth(sys)
+ pathToAuth, userSpecifiedPath, err := getPathToAuth(sys)
if err == nil {
- paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
+ paths = append(paths, pathToAuth)
} else {
// Error means that the path set for XDG_RUNTIME_DIR does not exist
// but we don't want to completely fail in the case that the user is pulling a public image
// Logging the error as a warning instead and moving on to pulling the image
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
}
- xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
- if xdgCfgHome == "" {
- xdgCfgHome = filepath.Join(homeDir, ".config")
- }
- paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
- if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
- paths = append(paths,
- authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false},
- )
- } else {
+ if !userSpecifiedPath {
+ xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
+ if xdgCfgHome == "" {
+ xdgCfgHome = filepath.Join(homeDir, ".config")
+ }
+ paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath)))
+ if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
+ paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json")))
+ } else {
+ paths = append(paths,
+ newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)),
+ )
+ }
paths = append(paths,
- authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
+ authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
)
}
- paths = append(paths,
- authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
- )
return paths
}
-// GetCredentials returns the registry credentials stored in the
-// registry-specific credential helpers or in the default global credentials
-// helpers with falling back to using either auth.json
-// file or .docker/config.json, including support for OAuth2 and IdentityToken.
+// GetCredentials returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
// If an entry is not found, an empty struct is returned.
-func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
- return getCredentialsWithHomeDir(sys, registry, homedir.Get())
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
+//
+// GetCredentialsForRef should almost always be used in favor of this API.
+func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) {
+ return getCredentialsWithHomeDir(sys, key, homedir.Get())
}
-// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials,
-// it exists only to allow testing it with an artificial home directory.
-func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
+// GetCredentialsForRef returns the registry credentials necessary for
+// accessing ref on the registry ref points to,
+// appropriate for sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
+ return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get())
+}
+
+// getCredentialsWithHomeDir is an internal implementation detail of
+// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
+// with an artificial home directory.
+func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) {
+ _, err := validateKey(key)
+ if err != nil {
+ return types.DockerAuthConfig{}, err
+ }
+
if sys != nil && sys.DockerAuthConfig != nil {
- logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry)
+ logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key)
return *sys.DockerAuthConfig, nil
}
+ var registry string // We compute this once because it is used in several places.
+ if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 {
+ registry = key[:firstSlash]
+ } else {
+ registry = key
+ }
+
// Anonymous function to query credentials from auth files.
- getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
+ getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
for _, path := range getAuthFilePaths(sys, homeDir) {
- authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
+ creds, err := findCredentialsInFile(key, registry, path)
if err != nil {
- return types.DockerAuthConfig{}, err
+ return types.DockerAuthConfig{}, "", err
}
- if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
- return authConfig, nil
+ if creds != (types.DockerAuthConfig{}) {
+ return creds, path.path, nil
}
}
- return types.DockerAuthConfig{}, nil
+ return types.DockerAuthConfig{}, "", nil
}
helpers, err := sysregistriesv2.CredentialHelpers(sys)
@@ -235,117 +232,201 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir strin
return types.DockerAuthConfig{}, err
}
- var multiErr error
+ var multiErr []error
for _, helper := range helpers {
- var creds types.DockerAuthConfig
- var err error
+ var (
+ creds types.DockerAuthConfig
+ helperKey string
+ credHelperPath string
+ err error
+ )
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- creds, err = getCredentialsFromAuthFiles()
+ helperKey = key
+ creds, credHelperPath, err = getCredentialsFromAuthFiles()
// External helpers.
default:
- creds, err = getAuthFromCredHelper(helper, registry)
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers, but a "registry" is a valid parent of "key".
+ helperKey = registry
+ creds, err = getCredsFromCredHelper(helper, registry)
}
if err != nil {
- logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err)
- multiErr = multierror.Append(multiErr, err)
+ logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
+ multiErr = append(multiErr, err)
continue
}
- if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 {
- continue
+ if creds != (types.DockerAuthConfig{}) {
+ msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper)
+ if credHelperPath != "" {
+ msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
+ }
+ logrus.Debug(msg)
+ return creds, nil
}
- logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper)
- return creds, nil
}
if multiErr != nil {
- return types.DockerAuthConfig{}, multiErr
+ return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr)
}
- logrus.Debugf("No credentials for %s found", registry)
+ logrus.Debugf("No credentials for %s found", key)
return types.DockerAuthConfig{}, nil
}
-// GetAuthentication returns the registry credentials stored in the
-// registry-specific credential helpers or in the default global credentials
-// helpers with falling back to using either auth.json file or
-// .docker/config.json
+// GetAuthentication returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
//
// Deprecated: This API only has support for username and password. To get the
-// support for oauth2 in docker registry authentication, we added the new
-// GetCredentials API. The new API should be used and this API is kept to
+// support for oauth2 in container registry authentication, we added the new
+// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to
// maintain backward compatibility.
-func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
- return getAuthenticationWithHomeDir(sys, registry, homedir.Get())
+func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) {
+ return getAuthenticationWithHomeDir(sys, key, homedir.Get())
}
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
// it exists only to allow testing it with an artificial home directory.
-func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
- auth, err := getCredentialsWithHomeDir(sys, registry, homeDir)
+func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
+ creds, err := getCredentialsWithHomeDir(sys, key, homeDir)
if err != nil {
return "", "", err
}
- if auth.IdentityToken != "" {
- return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it")
+ if creds.IdentityToken != "" {
+ return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
+ }
+ return creds.Username, creds.Password, nil
+}
+
+// SetCredentials stores the username and password in a location
+// appropriate for sys and the users’ configuration.
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
+// Returns a human-readable description of the location that was updated.
+// NOTE: The return value is only intended to be read by humans; its form is not an API,
+// it may change (or new forms can be added) any time.
+func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
+ helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
+ if err != nil {
+ return "", err
+ }
+
+ // Make sure to collect all errors.
+ var multiErr []error
+ for _, helper := range helpers {
+ var desc string
+ var err error
+ switch helper {
+ // Special-case the built-in helpers for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ if ch, exists := fileContents.CredHelpers[key]; exists {
+ if isNamespaced {
+ return false, "", unsupportedNamespaceErr(ch)
+ }
+ desc, err := setCredsInCredHelper(ch, key, username, password)
+ if err != nil {
+ return false, "", err
+ }
+ return false, desc, nil
+ }
+ creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
+ newCreds := dockerAuthConfig{Auth: creds}
+ fileContents.AuthConfigs[key] = newCreds
+ return true, "", nil
+ })
+ // External helpers.
+ default:
+ if isNamespaced {
+ err = unsupportedNamespaceErr(helper)
+ } else {
+ desc, err = setCredsInCredHelper(helper, key, username, password)
+ }
+ }
+ if err != nil {
+ multiErr = append(multiErr, err)
+ logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
+ continue
+ }
+ logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
+ return desc, nil
}
- return auth.Username, auth.Password, nil
+ return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr)
+}
+
+func unsupportedNamespaceErr(helper string) error {
+ return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
+}
+
+// SetAuthentication stores the username and password in the credential helper or file
+// See the documentation of SetCredentials for format of "key"
+func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
+ _, err := SetCredentials(sys, key, username, password)
+ return err
}
-// RemoveAuthentication removes credentials for `registry` from all possible
+// RemoveAuthentication removes credentials for `key` from all possible
// sources such as credential helpers and auth files.
-func RemoveAuthentication(sys *types.SystemContext, registry string) error {
- helpers, err := sysregistriesv2.CredentialHelpers(sys)
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
+func RemoveAuthentication(sys *types.SystemContext, key string) error {
+ helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
if err != nil {
return err
}
- var multiErr error
isLoggedIn := false
- removeFromCredHelper := func(helper string) {
- err := deleteAuthFromCredHelper(helper, registry)
+ removeFromCredHelper := func(helper string) error {
+ if isNamespaced {
+ logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
+ return nil
+ }
+ err := deleteCredsFromCredHelper(helper, key)
if err == nil {
- logrus.Debugf("Credentials for %q were deleted from credential helper %s", registry, helper)
+ logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
- return
+ return nil
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
- logrus.Debugf("Not logged in to %s with credential helper %s", registry, helper)
- return
+ logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
+ return nil
}
- multiErr = multierror.Append(multiErr, errors.Wrapf(err, "error removing credentials for %s from credential helper %s", registry, helper))
+ return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)
}
+ var multiErr []error
for _, helper := range helpers {
var err error
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
- if innerHelper, exists := auths.CredHelpers[registry]; exists {
- removeFromCredHelper(innerHelper)
+ _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ var helperErr error
+ if innerHelper, exists := fileContents.CredHelpers[key]; exists {
+ helperErr = removeFromCredHelper(innerHelper)
}
- if _, ok := auths.AuthConfigs[registry]; ok {
- isLoggedIn = true
- delete(auths.AuthConfigs, registry)
- } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
+ if _, ok := fileContents.AuthConfigs[key]; ok {
isLoggedIn = true
- delete(auths.AuthConfigs, normalizeRegistry(registry))
+ delete(fileContents.AuthConfigs, key)
}
- return true, multiErr
+ return true, "", helperErr
})
if err != nil {
- multiErr = multierror.Append(multiErr, err)
+ multiErr = append(multiErr, err)
}
// External helpers.
default:
- removeFromCredHelper(helper)
+ if err := removeFromCredHelper(helper); err != nil {
+ multiErr = append(multiErr, err)
+ }
}
}
if multiErr != nil {
- return multiErr
+ return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr)
}
if !isLoggedIn {
return ErrNotLoggedIn
@@ -357,194 +438,348 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
// RemoveAllAuthentication deletes all the credentials stored in credential
// helpers and auth files.
func RemoveAllAuthentication(sys *types.SystemContext) error {
- helpers, err := sysregistriesv2.CredentialHelpers(sys)
+ helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false)
if err != nil {
return err
}
- var multiErr error
+ var multiErr []error
for _, helper := range helpers {
var err error
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
- for registry, helper := range auths.CredHelpers {
+ _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ for registry, helper := range fileContents.CredHelpers {
// Helpers in auth files are expected
// to exist, so no special treatment
// for them.
- if err := deleteAuthFromCredHelper(helper, registry); err != nil {
- return false, err
+ if err := deleteCredsFromCredHelper(helper, registry); err != nil {
+ return false, "", err
}
}
- auths.CredHelpers = make(map[string]string)
- auths.AuthConfigs = make(map[string]dockerAuthConfig)
- return true, nil
+ fileContents.CredHelpers = make(map[string]string)
+ fileContents.AuthConfigs = make(map[string]dockerAuthConfig)
+ return true, "", nil
})
// External helpers.
default:
var creds map[string]string
- creds, err = listAuthsFromCredHelper(helper)
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- err = deleteAuthFromCredHelper(helper, registry)
- if err != nil {
- break
- }
+ creds, err = listCredsInCredHelper(helper)
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ // It's okay if the helper doesn't exist.
+ continue
+ } else {
+ break
+ }
+ }
+ for registry := range creds {
+ err = deleteCredsFromCredHelper(helper, registry)
+ if err != nil {
+ break
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- continue
- default:
- // fall through
}
}
if err != nil {
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
- multiErr = multierror.Append(multiErr, err)
+ multiErr = append(multiErr, err)
continue
}
logrus.Debugf("All credentials removed from credential helper %s", helper)
}
- return multiErr
+ if multiErr != nil {
+ return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr)
+ }
+ return nil
}
-func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
+// prepareForEdit processes sys and key (if keyRelevant) to return:
+// - a list of credential helpers
+// - a function which can be used to edit the JSON file
+// - the key value to actually use in credential helpers / JSON
+// - a boolean which is true if key is namespaced (and should not be used with credential helpers).
+func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) {
+ var isNamespaced bool
+ if keyRelevant {
+ ns, err := validateKey(key)
+ if err != nil {
+ return nil, nil, "", false, err
+ }
+ isNamespaced = ns
+ }
+
+ if sys != nil && sys.DockerCompatAuthFilePath != "" {
+ if sys.AuthFilePath != "" {
+ return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
+ }
+ if keyRelevant {
+ if isNamespaced {
+ return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key)
+ }
+ if key == "docker.io" {
+ key = "https://index.docker.io/v1/"
+ }
+ }
+
+ // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them.
+ return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil
+ }
+
+ helpers, err := sysregistriesv2.CredentialHelpers(sys)
+ if err != nil {
+ return nil, nil, "", false, err
+ }
+
+ return helpers, modifyJSON, key, isNamespaced, nil
+}
+
+func listCredsInCredHelper(credHelper string) (map[string]string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
return helperclient.List(p)
}
-// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
-// returns the path, and a bool specifies whether the file is in legacy format
-func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
+// getPathToAuth gets the path of the auth.json file used for reading and writing credentials,
+// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults)
+func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) {
return getPathToAuthWithOS(sys, runtime.GOOS)
}
// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
// it exists only to allow testing it with an artificial runtime.GOOS.
-func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) {
+func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) {
if sys != nil {
+ if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" {
+ return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
+ }
if sys.AuthFilePath != "" {
- return sys.AuthFilePath, false, nil
+ return newAuthPathDefault(sys.AuthFilePath), true, nil
+ }
+ // When reading, we can process auth.json and Docker’s config.json with the same code.
+ // When writing, prepareForEdit chooses an appropriate jsonEditor implementation.
+ if sys.DockerCompatAuthFilePath != "" {
+ return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil
}
if sys.LegacyFormatAuthFilePath != "" {
- return sys.LegacyFormatAuthFilePath, true, nil
+ return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
}
- if sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
+ // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME
+ if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" {
+ return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
}
}
- if goOS == "windows" || goOS == "darwin" {
- return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
+ if goOS != "linux" {
+ return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
}
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
// This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway.
- // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
- _, err := os.Stat(runtimeDir)
- if os.IsNotExist(err) {
+ // We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case.
+ err := fileutils.Exists(runtimeDir)
+ if errors.Is(err, fs.ErrNotExist) {
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable,
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
- return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
+ return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
- return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
+ return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil
}
- return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil
+ return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
}
-// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
+// parse unmarshals the credentials stored in the auth.json file and returns it
// or returns an empty dockerConfigFile data structure if auth.json does not exist
-// if the file exists and is empty, readJSONFile returns an error
-func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
- var auths dockerConfigFile
+// if the file exists and is empty, this function returns an error.
+func (path authPath) parse() (dockerConfigFile, error) {
+ var fileContents dockerConfigFile
- raw, err := ioutil.ReadFile(path)
+ raw, err := os.ReadFile(path.path)
if err != nil {
if os.IsNotExist(err) {
- auths.AuthConfigs = map[string]dockerAuthConfig{}
- return auths, nil
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
+ return fileContents, nil
}
return dockerConfigFile{}, err
}
- if legacyFormat {
- if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
+ if path.legacyFormat {
+ if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil {
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
}
- return auths, nil
+ return fileContents, nil
}
- if err = json.Unmarshal(raw, &auths); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
+ if err = json.Unmarshal(raw, &fileContents); err != nil {
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
}
- if auths.AuthConfigs == nil {
- auths.AuthConfigs = map[string]dockerAuthConfig{}
+ if fileContents.AuthConfigs == nil {
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
}
- if auths.CredHelpers == nil {
- auths.CredHelpers = make(map[string]string)
+ if fileContents.CredHelpers == nil {
+ fileContents.CredHelpers = make(map[string]string)
}
- return auths, nil
+ return fileContents, nil
}
-// modifyJSON writes to auth.json if the dockerConfigFile has been updated
-func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error {
- path, legacyFormat, err := getPathToAuth(sys)
+// modifyJSON finds an auth.json file, calls editor on the contents, and
+// writes it back if editor returns true.
+// Returns a human-readable description of the file, to be returned by SetCredentials.
+//
+// The editor may also return a human-readable description of the updated location; if it is "",
+// the file itself is used.
+func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
+ path, _, err := getPathToAuth(sys)
if err != nil {
- return err
+ return "", err
}
- if legacyFormat {
- return fmt.Errorf("writes to %s using legacy format are not supported", path)
+ if path.legacyFormat {
+ return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path)
}
- dir := filepath.Dir(path)
+ dir := filepath.Dir(path.path)
if err = os.MkdirAll(dir, 0700); err != nil {
- return err
+ return "", err
}
- auths, err := readJSONFile(path, false)
+ fileContents, err := path.parse()
if err != nil {
- return errors.Wrapf(err, "error reading JSON file %q", path)
+ return "", fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
- updated, err := editor(&auths)
+ updated, description, err := editor(&fileContents)
if err != nil {
- return errors.Wrapf(err, "error updating %q", path)
+ return "", fmt.Errorf("updating %q: %w", path.path, err)
}
if updated {
- newData, err := json.MarshalIndent(auths, "", "\t")
+ newData, err := json.MarshalIndent(fileContents, "", "\t")
if err != nil {
- return errors.Wrapf(err, "error marshaling JSON %q", path)
+ return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err)
}
- if err = ioutil.WriteFile(path, newData, 0600); err != nil {
- return errors.Wrapf(err, "error writing to file %q", path)
+ if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil {
+ return "", fmt.Errorf("writing to file %q: %w", path.path, err)
}
}
- return nil
+ if description == "" {
+ description = path.path
+ }
+ return description, nil
}
-func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
+// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and
+// writes it back if editor returns true.
+// Returns a human-readable description of the file, to be returned by SetCredentials.
+//
+// The editor may also return a human-readable description of the updated location; if it is "",
+// the file itself is used.
+func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
+ if sys == nil || sys.DockerCompatAuthFilePath == "" {
+ return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set")
+ }
+ path := sys.DockerCompatAuthFilePath
+
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return "", err
+ }
+
+ // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions.
+ var rawContents map[string]json.RawMessage
+ originalBytes, err := os.ReadFile(path)
+ switch {
+ case err == nil:
+ if err := json.Unmarshal(originalBytes, &rawContents); err != nil {
+ return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
+ }
+ case errors.Is(err, fs.ErrNotExist):
+ rawContents = map[string]json.RawMessage{}
+ default: // err != nil
+ return "", err
+ }
+
+ syntheticContents := dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{},
+ CredHelpers: map[string]string{},
+ }
+ // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably
+ // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with
+ // unexpected case.
+ if rawAuths, ok := rawContents["auths"]; ok {
+ // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field
+ // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials).
+ // It might make sense to track which entries of "auths" we actually modified, and to not touch any others.
+ if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil {
+ return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err)
+ }
+ }
+ if rawCH, ok := rawContents["credHelpers"]; ok {
+ if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil {
+ return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err)
+
+ }
+ }
+
+ updated, description, err := editor(&syntheticContents)
+ if err != nil {
+ return "", fmt.Errorf("updating %q: %w", path, err)
+ }
+ if updated {
+ rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t")
+ if err != nil {
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
+ }
+ rawContents["auths"] = rawAuths
+ // We never modify syntheticContents.CredHelpers, so we don’t need to update it.
+ newData, err := json.MarshalIndent(rawContents, "", "\t")
+ if err != nil {
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
+ }
+
+ if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
+ return "", fmt.Errorf("writing to file %q: %w", path, err)
+ }
+ }
+
+ if description == "" {
+ description = path
+ }
+ return description, nil
+}
+
+func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
creds, err := helperclient.Get(p, registry)
if err != nil {
+ if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
+ logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper)
+ err = nil
+ }
return types.DockerAuthConfig{}, err
}
- return types.DockerAuthConfig{
- Username: creds.Username,
- Password: creds.Secret,
- }, nil
+
+ switch creds.Username {
+ case "":
+ return types.DockerAuthConfig{
+ IdentityToken: creds.Secret,
+ }, nil
+ default:
+ return types.DockerAuthConfig{
+ Username: creds.Username,
+ Password: creds.Secret,
+ }, nil
+ }
}
-func setAuthToCredHelper(credHelper, registry, username, password string) error {
+// setCredsInCredHelper stores (username, password) for registry in credHelper.
+// Returns a human-readable description of the destination, to be returned by SetCredentials.
+func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
creds := &credentials.Credentials{
@@ -552,62 +787,114 @@ func setAuthToCredHelper(credHelper, registry, username, password string) error
Username: username,
Secret: password,
}
- return helperclient.Store(p, creds)
+ if err := helperclient.Store(p, creds); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("credential helper: %s", credHelper), nil
}
-func deleteAuthFromCredHelper(credHelper, registry string) error {
+func deleteCredsFromCredHelper(credHelper, registry string) error {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
return helperclient.Erase(p, registry)
}
-// findAuthentication looks for auth of registry in path
-func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
- auths, err := readJSONFile(path, legacyFormat)
+// findCredentialsInFile looks for credentials matching "key"
+// (which is "registry" or a namespace in "registry") in "path".
+func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) {
+ fileContents, err := path.parse()
if err != nil {
- return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path)
+ return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
// First try cred helpers. They should always be normalized.
- if ch, exists := auths.CredHelpers[registry]; exists {
- return getAuthFromCredHelper(ch, registry)
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers.
+ if ch, exists := fileContents.CredHelpers[registry]; exists {
+ logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path)
+ return getCredsFromCredHelper(ch, registry)
+ }
+
+ // Support sub-registry namespaces in auth.
+ // (This is not a feature of ~/.docker/config.json; we support it even for
+ // those files as an extension.)
+ var keys []string
+ if !path.legacyFormat {
+ keys = authKeysForKey(key)
+ } else {
+ keys = []string{registry}
}
- // I'm feeling lucky
- if val, exists := auths.AuthConfigs[registry]; exists {
- return decodeDockerAuth(val)
+ // Repo or namespace keys are only supported as exact matches. For registry
+ // keys we prefer exact matches as well.
+ for _, key := range keys {
+ if val, exists := fileContents.AuthConfigs[key]; exists {
+ return decodeDockerAuth(path.path, key, val)
+ }
}
// bad luck; let's normalize the entries first
+ // This primarily happens for legacyFormat, which for a time used API URLs
+ // (http[s:]//…/v1/) as keys.
+ // Secondarily, (docker login) accepted URLs with no normalization for
+ // several years, and matched registry hostnames against that, so support
+ // those entries even in non-legacyFormat ~/.docker/config.json.
+ // The docker.io registry still uses the /v1/ key with a special host name,
+ // so account for that as well.
registry = normalizeRegistry(registry)
- normalizedAuths := map[string]dockerAuthConfig{}
- for k, v := range auths.AuthConfigs {
- normalizedAuths[normalizeRegistry(k)] = v
+ for k, v := range fileContents.AuthConfigs {
+ if normalizeAuthFileKey(k, path.legacyFormat) == registry {
+ return decodeDockerAuth(path.path, k, v)
+ }
}
- if val, exists := normalizedAuths[registry]; exists {
- return decodeDockerAuth(val)
+ // Only log this if we found nothing; getCredentialsWithHomeDir logs the
+ // source of found data.
+ logrus.Debugf("No credentials matching %s found in %s", key, path.path)
+ return types.DockerAuthConfig{}, nil
+}
+
+// authKeysForKey returns the keys matching a provided auth file key, in order
+// from the best match to worst. For example,
+// when given a repository key "quay.io/repo/ns/image", it returns
+// - quay.io/repo/ns/image
+// - quay.io/repo/ns
+// - quay.io/repo
+// - quay.io
+func authKeysForKey(key string) (res []string) {
+ for {
+ res = append(res, key)
+
+ lastSlash := strings.LastIndex(key, "/")
+ if lastSlash == -1 {
+ break
+ }
+ key = key[:lastSlash]
}
- return types.DockerAuthConfig{}, nil
+ return res
}
-// decodeDockerAuth decodes the username and password, which is
-// encoded in base64.
-func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
+// decodeDockerAuth decodes the username and password from conf,
+// which is entry key in path.
+func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil {
return types.DockerAuthConfig{}, err
}
- parts := strings.SplitN(string(decoded), ":", 2)
- if len(parts) != 2 {
+ user, passwordPart, valid := strings.Cut(string(decoded), ":")
+ if !valid {
// if it's invalid just skip, as docker does
+ if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
+ logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
+ } else {
+ logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
+ }
return types.DockerAuthConfig{}, nil
}
- user := parts[0]
- password := strings.Trim(parts[1], "\x00")
+ password := strings.Trim(passwordPart, "\x00")
return types.DockerAuthConfig{
Username: user,
Password: password,
@@ -615,27 +902,49 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
}, nil
}
-// convertToHostname converts a registry url which has http|https prepended
-// to just an hostname.
-// Copied from github.com/docker/docker/registry/auth.go
-func convertToHostname(url string) string {
- stripped := url
- if strings.HasPrefix(url, "http://") {
- stripped = strings.TrimPrefix(url, "http://")
- } else if strings.HasPrefix(url, "https://") {
- stripped = strings.TrimPrefix(url, "https://")
- }
+// normalizeAuthFileKey takes a key, converts it to a host name and normalizes
+// the resulting registry.
+func normalizeAuthFileKey(key string, legacyFormat bool) string {
+ stripped := strings.TrimPrefix(key, "http://")
+ stripped = strings.TrimPrefix(stripped, "https://")
- nameParts := strings.SplitN(stripped, "/", 2)
+ if legacyFormat || stripped != key {
+ stripped, _, _ = strings.Cut(stripped, "/")
+ }
- return nameParts[0]
+ return normalizeRegistry(stripped)
}
+// normalizeRegistry converts the provided registry if a known docker.io host
+// is provided.
func normalizeRegistry(registry string) string {
- normalized := convertToHostname(registry)
- switch normalized {
+ switch registry {
case "registry-1.docker.io", "docker.io":
return "index.docker.io"
}
- return normalized
+ return registry
+}
+
+// validateKey verifies that the input key does not have a prefix that is not
+// allowed and returns an indicator if the key is namespaced.
+func validateKey(key string) (bool, error) {
+ if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
+ return false, fmt.Errorf("key %s contains http[s]:// prefix", key)
+ }
+
+ // Ideally this should only accept explicitly valid keys, compare
+ // validateIdentityRemappingPrefix. For now, just reject values that look
+ // like tagged or digested values.
+ if strings.ContainsRune(key, '@') {
+ return false, fmt.Errorf(`key %s contains a '@' character`, key)
+ }
+
+ firstSlash := strings.IndexRune(key, '/')
+ isNamespaced := firstSlash != -1
+ // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo.
+ if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') {
+ return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key)
+ }
+ // check if the provided key contains one or more subpaths.
+ return isNamespaced, nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
deleted file mode 100644
index 5bbfb450f..000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/internal/pkg/keyctl"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// NOTE: none of the functions here are currently used. If we ever want to
-// reenable keyring support, we should introduce a similar built-in credential
-// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
-
-const keyDescribePrefix = "container-registry-login:" // nolint
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) { // nolint
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return "", "", err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return "", "", err
- }
- authData, err := key.Get()
- if err != nil {
- return "", "", err
- }
- parts := strings.SplitN(string(authData), "\x00", 2)
- if len(parts) != 2 {
- return "", "", nil
- }
- return parts[0], parts[1], nil
-}
-
-func deleteAuthFromKernelKeyring(registry string) error { // nolint
- userkeyring, err := keyctl.UserKeyring()
-
- if err != nil {
- return err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return err
- }
- return key.Unlink()
-}
-
-func removeAllAuthFromKernelKeyring() error { // nolint
- keys, err := keyctl.ReadUserKeyring()
- if err != nil {
- return err
- }
-
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return err
- }
-
- for _, k := range keys {
- keyAttr, err := k.Describe()
- if err != nil {
- return err
- }
- // split string "type;uid;gid;perm;description"
- keyAttrs := strings.SplitN(keyAttr, ";", 5)
- if len(keyAttrs) < 5 {
- return errors.Errorf("Key attributes of %d are not available", k.ID())
- }
- keyDescribe := keyAttrs[4]
- if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
- err := keyctl.Unlink(userkeyring, k)
- if err != nil {
- return errors.Wrapf(err, "error unlinking key %d", k.ID())
- }
- logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
- }
- }
- return nil
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error { // nolint
- keyring, err := keyctl.SessionKeyring()
- if err != nil {
- return err
- }
- id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password)))
- if err != nil {
- return err
- }
-
- // sets all permission(view,read,write,search,link,set attribute) for current user
- // it enables the user to search the key after it linked to user keyring and unlinked from session keyring
- err = keyctl.SetPerm(id, keyctl.PermUserAll)
- if err != nil {
- return err
- }
- // link the key to userKeyring
- userKeyring, err := keyctl.UserKeyring()
- if err != nil {
- return errors.Wrapf(err, "error getting user keyring")
- }
- err = keyctl.Link(userKeyring, id)
- if err != nil {
- return errors.Wrapf(err, "error linking the key to user keyring")
- }
- // unlink the key from session keyring
- err = keyctl.Unlink(keyring, id)
- if err != nil {
- return errors.Wrapf(err, "error unlinking the key from session keyring")
- }
- return nil
-}
-
-func genDescription(registry string) string { // nolint
- return fmt.Sprintf("%s%s", keyDescribePrefix, registry)
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
deleted file mode 100644
index 9b0e8bee2..000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !linux
-// +build !386 !amd64
-
-package config
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) {
- return "", "", ErrNotSupported
-}
-
-func deleteAuthFromKernelKeyring(registry string) error {
- return ErrNotSupported
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error {
- return ErrNotSupported
-}
-
-func removeAllAuthFromKernelKeyring() error {
- return ErrNotSupported
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
index b64f44674..9317600e5 100644
--- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
@@ -1,17 +1,19 @@
package shortnames
import (
+ "errors"
"fmt"
"os"
+ "slices"
"strings"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/manifoldco/promptui"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "golang.org/x/crypto/ssh/terminal"
+ "golang.org/x/term"
)
// IsShortName returns true if the specified input is a "short name". A "short
@@ -20,9 +22,9 @@ import (
// short names.
//
// Examples:
-// * short names: "image:tag", "library/fedora"
-// * not short names: "quay.io/image", "localhost/image:tag",
-// "server.org:5000/lib/image", "image@sha256:..."
+// - short names: "image:tag", "library/fedora"
+// - not short names: "quay.io/image", "localhost/image:tag",
+// "server.org:5000/lib/image", "image@sha256:..."
func IsShortName(input string) bool {
isShort, _, _ := parseUnnormalizedShortName(input)
return isShort
@@ -33,12 +35,12 @@ func IsShortName(input string) bool {
func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
ref, err := reference.Parse(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot parse input: %q", input)
+ return false, nil, fmt.Errorf("cannot parse input: %q: %w", input, err)
}
named, ok := ref.(reference.Named)
if !ok {
- return true, nil, errors.Errorf("%q is not a named reference", input)
+ return true, nil, fmt.Errorf("%q is not a named reference", input)
}
registry := reference.Domain(named)
@@ -47,7 +49,7 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
named, err = reference.ParseNormalizedNamed(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input)
+ return false, nil, fmt.Errorf("cannot normalize input: %q: %w", input, err)
}
return false, named, nil
}
@@ -59,16 +61,12 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
// the tag or digest and stores it in the return values so that both can be
// re-added to a possible resolved alias' or USRs at a later point.
func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) {
- normalized = named
-
- tagged, isT := named.(reference.NamedTagged)
- if isT {
+ if tagged, ok := named.(reference.NamedTagged); ok {
isTagged = true
tag = tagged.Tag()
}
- digested, isD := named.(reference.Digested)
- if isD {
+ if digested, ok := named.(reference.Digested); ok {
isDigested = true
digest = digested.Digest()
}
@@ -87,7 +85,7 @@ func Add(ctx *types.SystemContext, name string, value reference.Named) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.AddShortNameAlias(ctx, name, value.String())
}
@@ -102,7 +100,7 @@ func Remove(ctx *types.SystemContext, name string) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.RemoveShortNameAlias(ctx, name)
}
@@ -118,6 +116,7 @@ type Resolved struct {
}
func (r *Resolved) addCandidate(named reference.Named) {
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r})
}
@@ -138,6 +137,8 @@ const (
rationaleUSR
// Resolved value has been selected by the user (via the prompt).
rationaleUserSelection
+ // Resolved value has been enforced to use Docker Hub (via SystemContext).
+ rationaleEnforcedDockerHub
)
// Description returns a human-readable description about the resolution
@@ -152,6 +153,8 @@ func (r *Resolved) Description() string {
return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription)
case rationaleUSR:
return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription)
+ case rationaleEnforcedDockerHub:
+ return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription)
case rationaleUserSelection, rationaleNone:
fallthrough
default:
@@ -165,26 +168,17 @@ func (r *Resolved) Description() string {
// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of
// pull errors must equal the amount of pull candidates.
func (r *Resolved) FormatPullErrors(pullErrors []error) error {
- if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) {
- pullErrors = append(pullErrors,
- errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
- len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
+ if len(pullErrors) == 0 {
+ return nil
}
- switch len(pullErrors) {
- case 0:
- return nil
- case 1:
- return pullErrors[0]
- default:
- var sb strings.Builder
- sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors)))
- for _, e := range pullErrors {
- sb.WriteString("\n * ")
- sb.WriteString(e.Error())
- }
- return errors.New(sb.String())
+ if len(pullErrors) != len(r.PullCandidates) {
+ pullErrors = append(slices.Clone(pullErrors),
+ fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
+ len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
}
+
+ return multierr.Format(fmt.Sprintf("%d errors occurred while pulling:\n * ", len(pullErrors)), "\n * ", "", pullErrors)
}
// PullCandidate is a resolved name. Once the Value has been used
@@ -211,7 +205,7 @@ func (c *PullCandidate) Record() error {
value := reference.TrimNamed(c.Value)
if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
- return errors.Wrapf(err, "error recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
+ return fmt.Errorf("recording short-name alias (%q=%q): %w", c.resolved.userInput, c.Value, err)
}
return nil
}
@@ -257,7 +251,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing:
// We're good.
default:
- return nil, errors.Errorf("unsupported short-name mode (%v)", mode)
+ return nil, fmt.Errorf("unsupported short-name mode (%v)", mode)
}
isShort, shortRef, err := parseUnnormalizedShortName(name)
@@ -265,8 +259,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, err
}
if !isShort { // no short name
- named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed
+ resolved.addCandidate(shortRef)
+ return resolved, nil
+ }
+
+ // Resolve to docker.io only if enforced by the caller (e.g., Podman's
+ // Docker-compatible REST API).
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, fmt.Errorf("cannot normalize input: %q: %w", name, err)
+ }
resolved.addCandidate(named)
+ resolved.rationale = rationaleEnforcedDockerHub
+ resolved.originDescription = "enforced by caller"
return resolved, nil
}
@@ -295,9 +301,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, err
}
}
- // Make sure to add ":latest" if needed
- namedAlias = reference.TagNameOnly(namedAlias)
-
resolved.addCandidate(namedAlias)
resolved.rationale = rationaleAlias
resolved.originDescription = aliasOriginDescription
@@ -314,20 +317,17 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
// Error out if there's no matching alias and no search registries.
if len(unqualifiedSearchRegistries) == 0 {
if usrConfig != "" {
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
}
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
}
resolved.originDescription = usrConfig
for _, reg := range unqualifiedSearchRegistries {
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
if err != nil {
- return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
}
- // Make sure to add ":latest" if needed
- named = reference.TagNameOnly(named)
-
resolved.addCandidate(named)
}
@@ -343,7 +343,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
}
// If we don't have a TTY, act according to the mode.
- if !terminal.IsTerminal(int(os.Stdout.Fd())) || !terminal.IsTerminal(int(os.Stdin.Fd())) {
+ if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stdin.Fd())) {
switch mode {
case types.ShortNameModePermissive:
// Permissive falls back to using all candidates.
@@ -353,7 +353,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY")
default:
// We should not end up here.
- return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode)
+ return nil, fmt.Errorf("unexpected short-name mode (%v) during resolution", mode)
}
}
@@ -376,7 +376,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
named, err := reference.ParseNormalizedNamed(selection)
if err != nil {
- return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection)
+ return nil, fmt.Errorf("selection %q is not a valid reference: %w", selection, err)
}
resolved.PullCandidates = nil
@@ -391,9 +391,9 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
// not a short name), it is returned as is. In case, it's a short name, the
// returned slice of named references looks as follows:
//
-// 1) If present, the short-name alias
-// 2) "localhost/" as used by many container engines such as Podman and Buildah
-// 3) Unqualified-search registries from the registries.conf files
+// 1. If present, the short-name alias
+// 2. "localhost/" as used by many container engines such as Podman and Buildah
+// 3. Unqualified-search registries from the registries.conf files
//
// Note that tags and digests are stripped from the specified name before
// looking up an alias. Stripped off tags and digests are later on appended to
@@ -412,6 +412,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
var candidates []reference.Named
+ // Complete the candidates with the specified registries.
+ completeCandidates := func(registries []string) ([]reference.Named, error) {
+ for _, reg := range registries {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
+ }
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
+ candidates = append(candidates, named)
+ }
+ return candidates, nil
+ }
+
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ return completeCandidates([]string{"docker.io"})
+ }
+
// Strip off the tag to normalize the short name for looking it up in
// the config files.
isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
@@ -434,9 +451,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
return nil, err
}
}
- // Make sure to add ":latest" if needed
- namedAlias = reference.TagNameOnly(namedAlias)
-
+ namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed
candidates = append(candidates, namedAlias)
}
@@ -447,16 +462,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
}
// Note that "localhost" has precedence over the unqualified-search registries.
- for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) {
- named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
- if err != nil {
- return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
- }
- // Make sure to add ":latest" if needed
- named = reference.TagNameOnly(named)
-
- candidates = append(candidates, named)
- }
-
- return candidates, nil
+ return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...))
}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go
new file mode 100644
index 000000000..07fe50294
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go
@@ -0,0 +1,12 @@
+//go:build !freebsd
+// +build !freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go
new file mode 100644
index 000000000..741b99f8f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go
@@ -0,0 +1,12 @@
+//go:build freebsd
+// +build freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d"
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
index 784a616dc..71f5bc837 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
@@ -1,17 +1,21 @@
package sysregistriesv2
import (
+ "fmt"
+ "maps"
"os"
"path/filepath"
+ "reflect"
"strings"
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@@ -49,6 +53,17 @@ type shortNameAliasConf struct {
// reference counter parts.
// Note that Aliases is niled after being loaded from a file.
Aliases map[string]string `toml:"aliases"`
+
+ // If you add any field, make sure to update nonempty() below.
+}
+
+// nonempty returns true if config contains at least one configuration entry.
+func (c *shortNameAliasConf) nonempty() bool {
+ copy := *c // A shallow copy
+ if copy.Aliases != nil && len(copy.Aliases) == 0 {
+ copy.Aliases = nil
+ }
+ return !reflect.DeepEqual(copy, shortNameAliasConf{})
}
// alias combines the parsed value of an alias with the config file it has been
@@ -153,7 +168,7 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
} else {
// If the name does not exist, throw an error.
if _, exists := conf.Aliases[name]; !exists {
- return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
+ return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
}
delete(conf.Aliases, name)
@@ -197,25 +212,25 @@ func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
func parseShortNameValue(alias string) (reference.Named, error) {
ref, err := reference.Parse(alias)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing alias %q", alias)
+ return nil, fmt.Errorf("parsing alias %q: %w", alias, err)
}
if _, ok := ref.(reference.Digested); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias)
}
if _, ok := ref.(reference.Tagged); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias)
}
named, ok := ref.(reference.Named)
if !ok {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
// A final parse to make sure that docker.io references are correctly
@@ -229,25 +244,25 @@ func parseShortNameValue(alias string) (reference.Named, error) {
func validateShortName(name string) error {
repo, err := reference.Parse(name)
if err != nil {
- return errors.Wrapf(err, "cannot parse short name: %q", name)
+ return fmt.Errorf("cannot parse short name: %q: %w", name, err)
}
if _, ok := repo.(reference.Digested); ok {
- return errors.Errorf("invalid short name %q: must not contain digest", name)
+ return fmt.Errorf("invalid short name %q: must not contain digest", name)
}
if _, ok := repo.(reference.Tagged); ok {
- return errors.Errorf("invalid short name %q: must not contain tag", name)
+ return fmt.Errorf("invalid short name %q: must not contain tag", name)
}
named, ok := repo.(reference.Named)
if !ok {
- return errors.Errorf("invalid short name %q: no name", name)
+ return fmt.Errorf("invalid short name %q: no name", name)
}
registry := reference.Domain(named)
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
- return errors.Errorf("invalid short name %q: must not contain registry", name)
+ return fmt.Errorf("invalid short name %q: must not contain registry", name)
}
return nil
}
@@ -283,11 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
}
}
if len(errs) > 0 {
- err := errs[0]
- for i := 1; i < len(errs); i++ {
- err = errors.Wrapf(err, "%v\n", errs[i])
- }
- return nil, err
+ return nil, multierr.Format("", "\n", "", errs)
}
return &res, nil
}
@@ -295,31 +306,32 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
// updateWithConfigurationFrom updates c with configuration from updates.
// In case of conflict, updates is preferred.
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
- for name, value := range updates.namedAliases {
- c.namedAliases[name] = value
- }
+ maps.Copy(c.namedAliases, updates.namedAliases)
}
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
conf := shortNameAliasConf{}
- _, err := toml.DecodeFile(confPath, &conf)
+ meta, err := toml.DecodeFile(confPath, &conf)
if err != nil && !os.IsNotExist(err) {
// It's okay if the config doesn't exist. Other errors are not.
- return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
+ }
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
}
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
// file could still be corrupted by another process or user.
cache, err := newShortNameAliasCache(confPath, &conf)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
return &conf, cache, nil
}
-func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) {
+func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) {
shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
if err != nil {
return "", nil, err
@@ -330,6 +342,6 @@ func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile
}
lockPath := shortNameAliasesConfPath + ".lock"
- locker, err := lockfile.GetLockfile(lockPath)
+ locker, err := lockfile.GetLockFile(lockPath)
return shortNameAliasesConfPath, locker, err
}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 880f8c871..1b161474d 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -2,9 +2,10 @@ package sysregistriesv2
import (
"fmt"
+ "io/fs"
"os"
"path/filepath"
- "regexp"
+ "reflect"
"sort"
"strings"
"sync"
@@ -12,9 +13,11 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
)
// systemRegistriesConfPath is the path to the system-wide registry
@@ -23,25 +26,27 @@ import (
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath
-// builtinRegistriesConfPath is the path to the registry configuration file.
-// DO NOT change this, instead see systemRegistriesConfPath above.
-const builtinRegistriesConfPath = "/etc/containers/registries.conf"
-
// systemRegistriesConfDirPath is the path to the system-wide registry
// configuration directory and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path'
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
-// builtinRegistriesConfDirPath is the path to the registry configuration directory.
-// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
-const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
-
// AuthenticationFileHelper is a special key for credential helpers indicating
// the usage of consulting containers-auth.json files instead of a credential
// helper.
const AuthenticationFileHelper = "containers-auth.json"
+const (
+ // configuration values for "pull-from-mirror"
+ // mirrors will be used for both digest pulls and tag pulls
+ MirrorAll = "all"
+ // mirrors will only be used for digest pulls
+ MirrorByDigestOnly = "digest-only"
+ // mirrors will only be used for tag pulls
+ MirrorByTagOnly = "tag-only"
+)
+
// Endpoint describes a remote location of a registry.
type Endpoint struct {
// The endpoint's remote location. Can be empty iff Prefix contains
@@ -52,6 +57,18 @@ type Endpoint struct {
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
Insecure bool `toml:"insecure,omitempty"`
+ // PullFromMirror is used for adding restrictions to image pull through the mirror.
+ // Set to "all", "digest-only", or "tag-only".
+ // If "digest-only", mirrors will only be used for digest pulls. Pulling images by
+ // tag can potentially yield different images, depending on which endpoint
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
+ // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror
+ // that it is less likely to be out of sync if tags move, it should not be unnecessarily
+ // used for digest references.
+ // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry.
+ // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint.
+ // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry.
+ PullFromMirror string `toml:"pull-from-mirror,omitempty"`
}
// userRegistriesFile is the path to the per user registry configuration file.
@@ -79,7 +96,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
// be dropped.
// https://github.com/containers/image/pull/1191#discussion_r610621608
if e.Location == "" {
- if prefix[:2] != "*." {
+ if !strings.HasPrefix(prefix, "*.") {
return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
}
return ref, nil
@@ -87,7 +104,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
newNamedRef = e.Location + refString[prefixLen:]
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
- return nil, errors.Wrapf(err, "error rewriting reference")
+ return nil, fmt.Errorf("rewriting reference: %w", err)
}
return newParsedRef, nil
@@ -114,7 +131,7 @@ type Registry struct {
Blocked bool `toml:"blocked,omitempty"`
// If true, mirrors will only be used for digest pulls. Pulling images by
// tag can potentially yield different images, depending on which endpoint
- // we pull from. Forcing digest-pulls for mirrors avoids that issue.
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"`
}
@@ -129,17 +146,29 @@ type PullSource struct {
// reference.
func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) {
var endpoints []Endpoint
-
+ _, isDigested := ref.(reference.Canonical)
if r.MirrorByDigestOnly {
- // Only use mirrors when the reference is a digest one.
- if _, isDigested := ref.(reference.Canonical); isDigested {
- endpoints = append(r.Mirrors, r.Endpoint)
- } else {
- endpoints = []Endpoint{r.Endpoint}
+ // Only use mirrors when the reference is a digested one.
+ if isDigested {
+ endpoints = append(endpoints, r.Mirrors...)
}
} else {
- endpoints = append(r.Mirrors, r.Endpoint)
+ for _, mirror := range r.Mirrors {
+ // skip the mirror if per mirror setting exists but reference does not match the restriction
+ switch mirror.PullFromMirror {
+ case MirrorByDigestOnly:
+ if !isDigested {
+ continue
+ }
+ case MirrorByTagOnly:
+ if isDigested {
+ continue
+ }
+ }
+ endpoints = append(endpoints, mirror)
+ }
}
+ endpoints = append(endpoints, r.Endpoint)
sources := []PullSource{}
for _, ep := range endpoints {
@@ -171,10 +200,27 @@ type V1RegistriesConf struct {
}
// Nonempty returns true if config contains at least one configuration entry.
+// Empty arrays are treated as missing entries.
func (config *V1RegistriesConf) Nonempty() bool {
- return (len(config.V1TOMLConfig.Search.Registries) != 0 ||
- len(config.V1TOMLConfig.Insecure.Registries) != 0 ||
- len(config.V1TOMLConfig.Block.Registries) != 0)
+ copy := *config // A shallow copy
+ if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 {
+ copy.V1TOMLConfig.Search.Registries = nil
+ }
+ if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 {
+ copy.V1TOMLConfig.Insecure.Registries = nil
+ }
+ if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 {
+ copy.V1TOMLConfig.Block.Registries = nil
+ }
+ return copy.hasSetField()
+}
+
+// hasSetField returns true if config contains at least one configuration entry.
+// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
+// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+// as a non-nil []string{}.
+func (config *V1RegistriesConf) hasSetField() bool {
+ return !reflect.DeepEqual(*config, V1RegistriesConf{})
}
// V2RegistriesConf is the sysregistries v2 configuration format.
@@ -202,13 +248,40 @@ type V2RegistriesConf struct {
// potentially use all unqualified-search registries
ShortNameMode string `toml:"short-name-mode"`
+ // AdditionalLayerStoreAuthHelper is a helper binary that receives
+ // registry credentials pass them to Additional Layer Store for
+ // registry authentication. These credentials are only collected when pulling (not pushing).
+ AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"`
+
shortNameAliasConf
+
+ // If you add any field, make sure to update Nonempty() below.
}
// Nonempty returns true if config contains at least one configuration entry.
func (config *V2RegistriesConf) Nonempty() bool {
- return (len(config.Registries) != 0 ||
- len(config.UnqualifiedSearchRegistries) != 0)
+ copy := *config // A shallow copy
+ if copy.Registries != nil && len(copy.Registries) == 0 {
+ copy.Registries = nil
+ }
+ if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 {
+ copy.UnqualifiedSearchRegistries = nil
+ }
+ if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 {
+ copy.CredentialHelpers = nil
+ }
+ if !copy.shortNameAliasConf.nonempty() {
+ copy.shortNameAliasConf = shortNameAliasConf{}
+ }
+ return copy.hasSetField()
+}
+
+// hasSetField returns true if config contains at least one configuration entry.
+// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
+// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+// as a non-nil []string{}.
+func (config *V2RegistriesConf) hasSetField() bool {
+ return !reflect.DeepEqual(*config, V2RegistriesConf{})
}
// parsedConfig is the result of parsing, and possibly merging, configuration files;
@@ -318,7 +391,7 @@ func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
}
// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
-var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
+var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
// postProcess checks the consistency of all the configuration, looks for conflicts,
// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
@@ -346,11 +419,15 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
}
// FIXME: allow config authors to always use Prefix.
// https://github.com/containers/image/pull/1191#discussion_r610622495
- if reg.Prefix[:2] != "*." && reg.Location == "" {
+ if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" {
return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
}
}
+ // validate the mirror usage settings does not apply to primary registry
+ if reg.PullFromMirror != "" {
+ return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix)
+ }
// make sure mirrors are valid
for _, mir := range reg.Mirrors {
mir.Location, err = parseLocation(mir.Location)
@@ -364,6 +441,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
if mir.Location == "" {
return &InvalidRegistries{s: "invalid condition: mirror location is unset"}
}
+
+ if reg.MirrorByDigestOnly && mir.PullFromMirror != "" {
+ return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)}
+ }
+ if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll &&
+ mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly {
+ return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)}
+ }
}
if reg.Location == "" {
regMap[reg.Prefix] = append(regMap[reg.Prefix], reg)
@@ -485,7 +570,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi
// decide configPath using per-user path or system file
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
wrapper.configPath = ctx.SystemRegistriesConfPath
- } else if _, err := os.Stat(userRegistriesFilePath); err == nil {
+ } else if err := fileutils.Exists(userRegistriesFilePath); err == nil {
// per-user registries.conf exists, not reading system dir
// return config dirs from ctx or per-user one
wrapper.configPath = userRegistriesFilePath
@@ -574,17 +659,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
}
for _, dirPath := range dirPaths {
- err := filepath.Walk(dirPath,
+ err := filepath.WalkDir(dirPath,
// WalkFunc to read additional configs
- func(path string, info os.FileInfo, err error) error {
+ func(path string, d fs.DirEntry, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
- case info == nil:
+ case d == nil:
// this should only happen when err != nil but let's be sure
return nil
- case info.IsDir():
+ case d.IsDir():
if path != dirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
@@ -604,7 +689,7 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
- return nil, errors.Wrapf(err, "error reading registries.conf.d")
+ return nil, fmt.Errorf("reading registries.conf.d: %w", err)
}
}
@@ -646,7 +731,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
return nil, err // Should never happen
}
} else {
- return nil, errors.Wrapf(err, "error loading registries configuration %q", wrapper.configPath)
+ return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err)
}
}
@@ -659,7 +744,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
// Enforce v2 format for drop-in-configs.
dropIn, err := loadConfigFile(path, true)
if err != nil {
- return nil, errors.Wrapf(err, "error loading drop-in registries configuration %q", path)
+ return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
}
config.updateWithConfigurationFrom(dropIn)
}
@@ -720,7 +805,7 @@ func parseShortNameMode(mode string) (types.ShortNameMode, error) {
case "permissive":
return types.ShortNameModePermissive, nil
default:
- return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode)
+ return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode)
}
}
@@ -745,6 +830,16 @@ func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
return config.partialV2.CredentialHelpers, nil
}
+// AdditionalLayerStoreAuthHelper returns the helper for passing registry
+// credentials to Additional Layer Store.
+func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) {
+ config, err := getConfig(sys)
+ if err != nil {
+ return "", err
+ }
+ return config.partialV2.AdditionalLayerStoreAuthHelper, nil
+}
+
// refMatchingSubdomainPrefix returns the length of ref
// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
@@ -781,7 +876,7 @@ func refMatchingSubdomainPrefix(ref, prefix string) int {
// (This is split from the caller primarily to make testing easier.)
func refMatchingPrefix(ref, prefix string) int {
switch {
- case prefix[0:2] == "*.":
+ case strings.HasPrefix(prefix, "*."):
return refMatchingSubdomainPrefix(ref, prefix)
case len(ref) < len(prefix):
return -1
@@ -854,20 +949,23 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// Load the tomlConfig. Note that `DecodeFile` will overwrite set fields.
var combinedTOML tomlConfig
- _, err := toml.DecodeFile(path, &combinedTOML)
+ meta, err := toml.DecodeFile(path, &combinedTOML)
if err != nil {
return nil, err
}
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, path)
+ }
- if combinedTOML.V1RegistriesConf.Nonempty() {
+ if combinedTOML.V1RegistriesConf.hasSetField() {
// Enforce the v2 format if requested.
if forceV2 {
return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
}
// Convert a v1 config into a v2 config.
- if combinedTOML.V2RegistriesConf.Nonempty() {
- return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
+ if combinedTOML.V2RegistriesConf.hasSetField() {
+ return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)}
}
converted, err := combinedTOML.V1RegistriesConf.ConvertToV2()
if err != nil {
@@ -901,7 +999,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// https://github.com/containers/image/pull/1191#discussion_r610623829
for i := range res.partialV2.Registries {
prefix := res.partialV2.Registries[i].Prefix
- if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") {
+ if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") {
msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
return nil, &InvalidRegistries{s: msg}
}
@@ -910,7 +1008,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// Parse and validate short-name aliases.
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
if err != nil {
- return nil, errors.Wrap(err, "error validating short-name aliases")
+ return nil, fmt.Errorf("validating short-name aliases: %w", err)
}
res.aliasCache = cache
// Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
@@ -938,12 +1036,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
- // the configuration where a non-deterministic order could easily cause
+ // configuration where a non-deterministic order could easily cause
// confusion.
- prefixes := []string{}
- for prefix := range registryMap {
- prefixes = append(prefixes, prefix)
- }
+ prefixes := maps.Keys(registryMap)
sort.Strings(prefixes)
c.partialV2.Registries = []Registry{}
@@ -971,6 +1066,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
c.shortNameMode = updates.shortNameMode
}
+ // == Merge AdditionalLayerStoreAuthHelper:
+ if updates.partialV2.AdditionalLayerStoreAuthHelper != "" {
+ c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper
+ }
+
// == Merge aliasCache:
// We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index 7e2142b1f..f6c0576e0 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -2,24 +2,23 @@ package tlsclientconfig
import (
"crypto/tls"
- "io/ioutil"
+ "crypto/x509"
+ "fmt"
"net"
"net/http"
"os"
"path/filepath"
+ "slices"
"strings"
"time"
- "github.com/docker/go-connections/sockets"
- "github.com/docker/go-connections/tlsconfig"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
func SetupCertificates(dir string, tlsc *tls.Config) error {
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
- fs, err := ioutil.ReadDir(dir)
+ fs, err := os.ReadDir(dir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
fullPath := filepath.Join(dir, f.Name())
if strings.HasSuffix(f.Name(), ".crt") {
logrus.Debugf(" crt: %s", fullPath)
- data, err := ioutil.ReadFile(fullPath)
+ data, err := os.ReadFile(fullPath)
if err != nil {
if os.IsNotExist(err) {
// Dangling symbolic link?
@@ -48,46 +47,43 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
return err
}
if tlsc.RootCAs == nil {
- systemPool, err := tlsconfig.SystemCertPool()
+ systemPool, err := x509.SystemCertPool()
if err != nil {
- return errors.Wrap(err, "unable to get system cert pool")
+ return fmt.Errorf("unable to get system cert pool: %w", err)
}
tlsc.RootCAs = systemPool
}
tlsc.RootCAs.AppendCertsFromPEM(data)
}
- if strings.HasSuffix(f.Name(), ".cert") {
+ if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok {
certName := f.Name()
- keyName := certName[:len(certName)-5] + ".key"
+ keyName := base + ".key"
logrus.Debugf(" cert: %s", fullPath)
if !hasFile(fs, keyName) {
- return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
}
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
if err != nil {
return err
}
- tlsc.Certificates = append(tlsc.Certificates, cert)
+ tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
}
- if strings.HasSuffix(f.Name(), ".key") {
+ if base, ok := strings.CutSuffix(f.Name(), ".key"); ok {
keyName := f.Name()
- certName := keyName[:len(keyName)-4] + ".cert"
+ certName := base + ".cert"
logrus.Debugf(" key: %s", fullPath)
if !hasFile(fs, certName) {
- return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
+ return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
}
}
}
return nil
}
-func hasFile(files []os.FileInfo, name string) bool {
- for _, f := range files {
- if f.Name() == name {
- return true
- }
- }
- return false
+func hasFile(files []os.DirEntry, name string) bool {
+ return slices.ContainsFunc(files, func(f os.DirEntry) bool {
+ return f.Name() == name
+ })
}
// NewTransport Creates a default transport
@@ -95,17 +91,13 @@ func NewTransport() *http.Transport {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
- DualStack: true,
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
- // TODO(dmcgowan): Call close idle connections when complete and use keep alive
- DisableKeepAlives: true,
- }
- if _, err := sockets.DialerFromEnvironment(direct); err != nil {
- logrus.Debugf("Can't execute DialerFromEnvironment: %v", err)
+ IdleConnTimeout: 90 * time.Second,
+ MaxIdleConns: 100,
}
return tr
}
diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go
new file mode 100644
index 000000000..70758ad43
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/load.go
@@ -0,0 +1,210 @@
+package sif
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
+const injectedScriptTargetPath = "/podman/runscript"
+
+// parseDefFile parses a SIF definition file from reader,
+// and returns non-trivial contents of the %environment and %runscript sections.
+func parseDefFile(reader io.Reader) ([]string, []string, error) {
+ type parserState int
+ const (
+ parsingOther parserState = iota
+ parsingEnvironment
+ parsingRunscript
+ )
+
+ environment := []string{}
+ runscript := []string{}
+
+ state := parsingOther
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ switch {
+ case s == `%environment`:
+ state = parsingEnvironment
+ case s == `%runscript`:
+ state = parsingRunscript
+ case strings.HasPrefix(s, "%"):
+ state = parsingOther
+ case state == parsingEnvironment:
+ if s != "" && !strings.HasPrefix(s, "#") {
+ environment = append(environment, s)
+ }
+ case state == parsingRunscript:
+ runscript = append(runscript, s)
+ default: // parsingOther: ignore the line
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
+ }
+ return environment, runscript, nil
+}
+
+// generateInjectedScript generates a shell script based on
+// SIF definition file %environment and %runscript data, and returns it.
+func generateInjectedScript(environment []string, runscript []string) []byte {
+ script := fmt.Sprintf("#!/bin/bash\n"+
+ "%s\n"+
+ "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
+ return []byte(script)
+}
+
+// processDefFile finds sif.DataDeffile in sifImage, if any,
+// and returns:
+// - the command to run
+// - contents of a script to inject as injectedScriptTargetPath, or nil
+func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
+ var environment, runscript []string
+
+ desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
+ if err == nil {
+ environment, runscript, err = parseDefFile(desc.GetReader())
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ var command string
+ var injectedScript []byte
+ if len(environment) == 0 && len(runscript) == 0 {
+ command = "bash"
+ injectedScript = nil
+ } else {
+ injectedScript = generateInjectedScript(environment, runscript)
+ command = injectedScriptTargetPath
+ }
+
+ return command, injectedScript, nil
+}
+
+func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
+ if injectedScript == nil {
+ return nil
+ }
+ filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
+ parentDirPath := filepath.Dir(filePath)
+ if err := os.MkdirAll(parentDirPath, 0755); err != nil {
+ return fmt.Errorf("creating %s: %w", parentDirPath, err)
+ }
+ if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
+ return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
+ }
+ return nil
+}
+
+// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
+// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
+// if necessary.
+func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ defer os.RemoveAll(extractedRootPath)
+
+ // Almost everything in extractedRootPath comes from squashFSPath.
+ conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
+ extractedRootPath, squashFSPath, extractedRootPath, tarPath)
+ script := "#!/bin/sh\n" + conversionCommand + "\n"
+ if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
+ return err
+ }
+ defer os.Remove(scriptPath)
+
+ // On top of squashFSPath, we only add injectedScript, if necessary.
+ if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
+ cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("converting image: %w, output: %s", err, string(output))
+ }
+ logrus.Debugf("... finished converting squashfs to tar")
+ return nil
+}
+
+// convertSIFToElements processes sifImage and creates/returns
+// the relevant elements for constructing an OCI-like image:
+// - A path to a tar file containing a root filesystem,
+// - A command to run.
+// The returned tar file path is inside tempDir, which can be assumed to be empty
+// at start, and is exclusively used by the current process (i.e. it is safe
+// to use hard-coded relative paths within it).
+func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
+ // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
+ // so we can just hard-code a set of unique values here.
+ // We create and/or manage cleanup of these two paths.
+ squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
+ tarPath := filepath.Join(tempDir, "rootfs.tar")
+ // We only allocate these paths, the user is responsible for cleaning them up.
+ extractedRootPath := filepath.Join(tempDir, "rootfs")
+ scriptPath := filepath.Join(tempDir, "script")
+
+ succeeded := false
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
+ // to run both creation and consumption of extractedRootPath in the same fakeroot context.
+ // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
+ // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
+ // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
+ // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
+ defer os.Remove(squashFSPath)
+ defer func() {
+ if !succeeded {
+ os.Remove(tarPath)
+ }
+ }()
+
+ command, injectedScript, err := processDefFile(sifImage)
+ if err != nil {
+ return "", nil, err
+ }
+
+ rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
+ if err != nil {
+ return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
+ }
+ // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
+ // has an -o option that allows extracting a squashfs from the SIF file directly,
+ // but that version is not currently available in RHEL 8.
+ logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
+ if err := func() error { // A scope for defer
+ f, err := os.Create(squashFSPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ return "", nil, err
+ }
+ logrus.Debugf("... finished creating a temporary squashfs image")
+
+ if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
+ return "", nil, err
+ }
+ succeeded = true
+ return tarPath, []string{command}, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
new file mode 100644
index 000000000..f8bf31034
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -0,0 +1,206 @@
+package sif
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecs "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+type sifImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ ref sifReference
+ workDir string
+ layerDigest digest.Digest
+ layerSize int64
+ layerFile string
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+}
+
+// getBlobInfo returns the digest, and size of the provided file.
+func getBlobInfo(path string) (digest.Digest, int64, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
+ }
+ defer f.Close()
+
+ // TODO: Instead of writing the tar file to disk, and reading
+ // it here again, stream the tar file to a pipe and
+ // compute the digest while writing it to disk.
+ logrus.Debugf("Computing a digest of the SIF conversion output...")
+ digester := digest.Canonical.Digester()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(digester.Hash(), f)
+ if err != nil {
+ return "", -1, fmt.Errorf("reading %q: %w", path, err)
+ }
+ digest := digester.Digest()
+ logrus.Debugf("... finished computing the digest of the SIF conversion output")
+
+ return digest, size, nil
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource extracts SIF objects and saves them in a temp directory.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) {
+ sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
+ if err != nil {
+ return nil, fmt.Errorf("loading SIF file: %w", err)
+ }
+ defer func() {
+ _ = sifImg.UnloadContainer()
+ }()
+
+ workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif")
+ if err != nil {
+ return nil, fmt.Errorf("creating temp directory: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.RemoveAll(workDir)
+ }
+ }()
+
+ layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
+ if err != nil {
+ return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
+ }
+
+ layerDigest, layerSize, err := getBlobInfo(layerPath)
+ if err != nil {
+ return nil, fmt.Errorf("gathering blob information: %w", err)
+ }
+
+ created := sifImg.ModifiedAt()
+ config := imgspecv1.Image{
+ Created: &created,
+ Platform: imgspecv1.Platform{
+ Architecture: sifImg.PrimaryArch(),
+ OS: "linux",
+ },
+ Config: imgspecv1.ImageConfig{
+ Cmd: commandLine,
+ },
+ RootFS: imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{layerDigest},
+ },
+ History: []imgspecv1.History{
+ {
+ Created: &created,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
+ Comment: "imported from SIF, uuid: " + sifImg.ID(),
+ },
+ {
+ Created: &created,
+ CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
+ EmptyLayer: true,
+ },
+ },
+ }
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
+ }
+ configDigest := digest.Canonical.FromBytes(configBytes)
+
+ manifest := imgspecv1.Manifest{
+ Versioned: imgspecs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Config: imgspecv1.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(configBytes)),
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ },
+ Layers: []imgspecv1.Descriptor{{
+ Digest: layerDigest,
+ Size: layerSize,
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ }},
+ }
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
+ }
+
+ succeeded = true
+ s := &sifImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ workDir: workDir,
+ layerDigest: layerDigest,
+ layerSize: layerSize,
+ layerFile: layerPath,
+ config: configBytes,
+ configDigest: configDigest,
+ manifest: manifestBytes,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *sifImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *sifImageSource) Close() error {
+ return os.RemoveAll(s.workDir)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ switch info.Digest {
+ case s.configDigest:
+ return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil
+ case s.layerDigest:
+ reader, err := os.Open(s.layerFile)
+ if err != nil {
+ return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
+ }
+ return reader, s.layerSize, nil
+ default:
+ return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
+ }
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.New("manifest lists are not supported by the sif transport")
+ }
+ return s.manifest, imgspecv1.MediaTypeImageManifest, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go
new file mode 100644
index 000000000..4c0901071
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/transport.go
@@ -0,0 +1,160 @@
+package sif
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for SIF images.
+var Transport = sifTransport{}
+
+type sifTransport struct{}
+
+func (t sifTransport) Name() string {
+ return "sif"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// sifReference is an ImageReference for SIF images.
+type sifReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ file string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no sif.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// sifTransport.ParseReference; callers who intentionally deal with SIF files
+// can use sif.NewReference.
+
+// NewReference returns an image file reference for a specified path.
+func NewReference(file string) (types.ImageReference, error) {
+ // We do not expose an API supplying the resolvedFile; we could, but recomputing it
+ // is generally cheap enough that we prefer being confident about the properties of resolvedFile.
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+ return sifReference{file: file, resolvedFile: resolved}, nil
+}
+
+func (ref sifReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref sifReference) StringWithinTransport() string {
+ return ref.file
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref sifReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref sifReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedFile
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref sifReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by sifTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return nil, errors.New(`"sif:" locations can only be read from, not written to`)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for sif: images")
+}
diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go
index 07fdd42a9..b313231a8 100644
--- a/vendor/github.com/containers/image/v5/signature/docker.go
+++ b/vendor/github.com/containers/image/v5/signature/docker.go
@@ -3,47 +3,84 @@
package signature
import (
+ "errors"
"fmt"
+ "slices"
+ "strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
"github.com/opencontainers/go-digest"
)
+// SignOptions includes optional parameters for signing container images.
+type SignOptions struct {
+ // Passphare to use when signing with the key identity.
+ Passphrase string
+}
+
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
-// using mech and keyIdentity.
-func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+// using mech and keyIdentity, and the specified options.
+func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return nil, err
}
sig := newUntrustedSignature(manifestDigest, dockerReference)
- return sig.sign(mech, keyIdentity)
+
+ var passphrase string
+ if options != nil {
+ passphrase = options.Passphrase
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return nil, errors.New("invalid passphrase: must not contain a line break")
+ }
+ }
+
+ return sig.sign(mech, keyIdentity, passphrase)
+}
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil)
}
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
// using mech.
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
+ sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity})
+ return sig, err
+}
+
+// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities
+// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that
+// was used to verify it.
+func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte,
+ expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) {
expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
if err != nil {
- return nil, err
+ return nil, "", err
}
+ var matchedKeyIdentity string
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
- if keyIdentity != expectedKeyIdentity {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
+ if !slices.Contains(expectedKeyIdentities, keyIdentity) {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities))
}
+ matchedKeyIdentity = keyIdentity
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
if err != nil {
- return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference))
}
if signedRef.String() != expectedRef.String() {
- return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
- signedDockerReference, expectedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q",
+ signedDockerReference, expectedDockerReference))
}
return nil
},
@@ -53,13 +90,13 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
return err
}
if !matches {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest))
}
return nil
},
})
if err != nil {
- return nil, err
+ return nil, "", err
}
- return sig, nil
+ return sig, matchedKeyIdentity, err
}
diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go
new file mode 100644
index 000000000..31dfdd342
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go
@@ -0,0 +1,207 @@
+//go:build !containers_image_fulcio_stub
+// +build !containers_image_fulcio_stub
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/sigstore/fulcio/pkg/certificate"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
+// Users should call validate() on the policy before using it.
+type fulcioTrustRoot struct {
+ caCertificates *x509.CertPool
+ oidcIssuer string
+ subjectEmail string
+}
+
+func (f *fulcioTrustRoot) validate() error {
+ if f.oidcIssuer == "" {
+ return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer")
+ }
+ if f.subjectEmail == "" {
+ return errors.New("Internal inconsistency: Fulcio use set up without subject email")
+ }
+ return nil
+}
+
+// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate;
+// it fails if the extension is not present in the certificate, or on any inconsistency.
+func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) {
+ // == Validate the recorded OIDC issuer
+ gotOIDCIssuer1 := false
+ gotOIDCIssuer2 := false
+ var oidcIssuer1, oidcIssuer2 string
+ // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies
+ // between certificate.OIDIssuer and certificate.OIDIssuerV2.
+ // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19,
+ // reject duplicates manually.
+ for _, untrustedExt := range untrustedCertificate.Extensions {
+ if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it.
+ if gotOIDCIssuer1 {
+ // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
+ // already in ParseCertificate.
+ return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension")
+ }
+ oidcIssuer1 = string(untrustedExt.Value)
+ gotOIDCIssuer1 = true
+ } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) {
+ if gotOIDCIssuer2 {
+ // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
+ // already in ParseCertificate.
+ return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension")
+ }
+ rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2)
+ if err != nil {
+ return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err))
+ }
+ if len(rest) != 0 {
+ return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data")
+ }
+ gotOIDCIssuer2 = true
+ }
+ }
+ switch {
+ case gotOIDCIssuer1 && gotOIDCIssuer2:
+ if oidcIssuer1 != oidcIssuer2 {
+ return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v",
+ oidcIssuer1, oidcIssuer2))
+ }
+ return oidcIssuer1, nil
+ case gotOIDCIssuer1:
+ return oidcIssuer1, nil
+ case gotOIDCIssuer2:
+ return oidcIssuer2, nil
+ default:
+ return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension")
+ }
+}
+
+func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) {
+ // == Verify the certificate is correctly signed
+ var untrustedIntermediatePool *x509.CertPool // = nil
+ // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar,
+ // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load
+ // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert
+ // for intermediate certificates.
+ if len(untrustedIntermediateChainBytes) > 0 {
+ untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes)
+ if err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err))
+ }
+ untrustedIntermediatePool = x509.NewCertPool()
+ if len(untrustedIntermediateChain) > 1 {
+ for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] {
+ untrustedIntermediatePool.AddCert(untrustedIntermediateCert)
+ }
+ }
+ }
+
+ untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes)
+ if err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err))
+ }
+ switch len(untrustedLeafCerts) {
+ case 0:
+ return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data")
+ case 1:
+ break // OK
+ default:
+ return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data")
+ }
+ untrustedCertificate := untrustedLeafCerts[0]
+
+ // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs;
+ // we match SAN ourselves, so override that.
+ if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 {
+ var remaining []asn1.ObjectIdentifier
+ for _, oid := range untrustedCertificate.UnhandledCriticalExtensions {
+ if !oid.Equal(cryptoutils.SANOID) {
+ remaining = append(remaining, oid)
+ }
+ }
+ untrustedCertificate.UnhandledCriticalExtensions = remaining
+ }
+
+ if _, err := untrustedCertificate.Verify(x509.VerifyOptions{
+ Intermediates: untrustedIntermediatePool,
+ Roots: f.caCertificates,
+ // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation),
+ // and validates the leaf certificate against relevantTime manually.
+ // We verify the full certificate chain against relevantTime instead.
+ // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference.
+ CurrentTime: relevantTime,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
+ }); err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err))
+ }
+
+ // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied).
+ //
+ // We don’t currently do that.
+ //
+ // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that
+ // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by
+ // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor.
+ //
+ // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly
+ // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative
+ // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually
+ // logging in using OpenID are not going to maintain a record of those actions.
+ //
+ // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures
+ // by correctly-issued certificates.
+ //
+ // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
+ // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
+ // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
+
+ // == Validate the recorded OIDC issuer
+ oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate)
+ if err != nil {
+ return nil, err
+ }
+ if oidcIssuer != f.oidcIssuer {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer))
+ }
+
+ // == Validate the OIDC subject
+ if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
+ f.subjectEmail,
+ untrustedCertificate.EmailAddresses))
+ }
+ // FIXME: Match more subject types? Cosign does:
+ // - .DNSNames (can’t be issued by Fulcio)
+ // - .IPAddresses (can’t be issued by Fulcio)
+ // - .URIs (CAN be issued by Fulcio)
+ // - OtherName values in SAN (CAN be issued by Fulcio)
+ // - Various values about GitHub workflows (CAN be issued by Fulcio)
+ // What does it… mean to get an OAuth2 identity for an IP address?
+ // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for
+ // issuers and/or subjects and/or combinations? Regexps? More?
+
+ return untrustedCertificate.PublicKey, nil
+}
+
+func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
+ untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
+ untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
+ rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes,
+ untrustedBase64Signature, untrustedPayloadBytes)
+ if err != nil {
+ return nil, err
+ }
+ return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes)
+}
diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
new file mode 100644
index 000000000..c0b48dafa
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
@@ -0,0 +1,28 @@
+//go:build containers_image_fulcio_stub
+// +build containers_image_fulcio_stub
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "errors"
+)
+
+type fulcioTrustRoot struct {
+ caCertificates *x509.CertPool
+ oidcIssuer string
+ subjectEmail string
+}
+
+func (f *fulcioTrustRoot) validate() error {
+ return errors.New("fulcio disabled at compile-time")
+}
+
+func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
+ untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
+ untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
+ return nil, errors.New("fulcio disabled at compile-time")
+
+}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go
new file mode 100644
index 000000000..7872f0f43
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go
@@ -0,0 +1,15 @@
+package internal
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+// This is publicly visible as signature.InvalidSignatureError
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+func NewInvalidSignatureError(msg string) InvalidSignatureError {
+ return InvalidSignatureError{msg: msg}
+}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go
new file mode 100644
index 000000000..f9efafb8e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/json.go
@@ -0,0 +1,90 @@
+package internal
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/set"
+)
+
+// JSONFormatError is returned when JSON does not match expected format.
+type JSONFormatError string
+
+func (err JSONFormatError) Error() string {
+ return string(err)
+}
+
+// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
+// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
+//
+// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
+// we could use reflection to automate this. Later?
+func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
+ seenKeys := set.New[string]()
+
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return JSONFormatError(err.Error())
+ }
+ if t != json.Delim('{') {
+ return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
+ }
+ for {
+ t, err := dec.Token()
+ if err != nil {
+ return JSONFormatError(err.Error())
+ }
+ if t == json.Delim('}') {
+ break
+ }
+
+ key, ok := t.(string)
+ if !ok {
+ // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
+ return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
+ }
+ if seenKeys.Contains(key) {
+ return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
+ }
+ seenKeys.Add(key)
+
+ valuePtr := fieldResolver(key)
+ if valuePtr == nil {
+ return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
+ }
+ // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
+ if err := dec.Decode(valuePtr); err != nil {
+ return JSONFormatError(err.Error())
+ }
+ }
+ if _, err := dec.Token(); err != io.EOF {
+ return JSONFormatError("Unexpected data after JSON object")
+ }
+ return nil
+}
+
+// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
+// must be present exactly once, and none other fields are accepted.
+func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
+ seenKeys := set.New[string]()
+ if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
+ if valuePtr, ok := exactFields[key]; ok {
+ seenKeys.Add(key)
+ return valuePtr
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ for key := range exactFields {
+ if !seenKeys.Contains(key) {
+ return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
new file mode 100644
index 000000000..bddaca690
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
@@ -0,0 +1,248 @@
+//go:build !containers_image_rekor_stub
+// +build !containers_image_rekor_stub
+
+package internal
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "time"
+
+ "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema.
+// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies.
+const HashedRekordV001APIVersion = "0.0.1"
+
+// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET
+// (note that this a signature-specific format, not a format directly used by the Rekor API).
+// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder.
+type UntrustedRekorSET struct {
+ UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload
+ UntrustedPayload json.RawMessage
+}
+
+type UntrustedRekorPayload struct {
+ Body []byte // In cosign, this is an any, but only a string works
+ IntegratedTime int64
+ LogIndex int64
+ LogID string
+}
+
+// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
+
+// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
+// All other errors are returned as is.
+func JSONFormatToInvalidSignatureError(err error) error {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ return err
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
+ return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
+ return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
+ "Payload": &s.UntrustedPayload,
+ })
+}
+
+// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler
+var _ json.Marshaler = UntrustedRekorSET{}
+var _ json.Marshaler = (*UntrustedRekorSET)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]any{
+ "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
+ "Payload": s.UntrustedPayload,
+ })
+}
+
+// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
+ return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data))
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
+ return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "body": &p.Body,
+ "integratedTime": &p.IntegratedTime,
+ "logIndex": &p.LogIndex,
+ "logID": &p.LogID,
+ })
+}
+
+// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler
+var _ json.Marshaler = UntrustedRekorPayload{}
+var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]any{
+ "body": p.Body,
+ "integratedTime": p.IntegratedTime,
+ "logIndex": p.LogIndex,
+ "logID": p.LogID,
+ })
+}
+
+// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
+// Returns bundle upload time on success.
+func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
+ // FIXME: Should the publicKey parameter hard-code ecdsa?
+
+ // == Parse SET bytes
+ var untrustedSET UntrustedRekorSET
+ // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature...
+ if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil {
+ return time.Time{}, NewInvalidSignatureError(err.Error())
+ }
+ // == Verify SET signature
+ // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary,
+ // assuming jsoncanonicalizer is designed to operate on untrusted data.
+ untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
+ }
+ untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
+ publicKeymatched := false
+ for _, pk := range publicKeys {
+ if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
+ publicKeymatched = true
+ break
+ }
+ }
+ if !publicKeymatched {
+ return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
+ }
+
+ // == Parse SET payload
+ // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation,
+ // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker
+ // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing
+ // of the SET payload.
+ var rekorPayload UntrustedRekorPayload
+ if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error()))
+ }
+ // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal
+ // hashedRekor.Spec and so on.
+ // Especially if we anticipate needing to decode different data formats…
+ // That would also allow being much more strict about JSON.
+ //
+ // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place.
+ var hashedRekord models.Hashedrekord
+ if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err))
+ }
+ // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us.
+ if hashedRekord.APIVersion == nil {
+ return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version")
+ }
+ if *hashedRekord.APIVersion != HashedRekordV001APIVersion {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion))
+ }
+ hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
+ if err != nil {
+ // Coverage: hashedRekord.Spec is an any that was just unmarshaled,
+ // so this should never fail.
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
+ }
+ var hashedRekordV001 models.HashedrekordV001Schema
+ if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err))
+ }
+
+ // == Match unverifiedKeyOrCertBytes
+ if hashedRekordV001.Signature == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`)
+ }
+ if hashedRekordV001.Signature.PublicKey == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`)
+
+ }
+ rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content)
+ if rekorKeyOrCertPEM == nil {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format")
+ }
+ if len(rest) != 0 {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data")
+ }
+ // FIXME: For public keys, let the caller provide the DER-formatted blob instead
+ // of round-tripping through PEM.
+ unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes)
+ if unverifiedKeyOrCertPEM == nil {
+ return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format")
+ }
+ if len(rest) != 0 {
+ return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data")
+ }
+ // NOTE: This compares the PEM payload, but not the object type or headers.
+ if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match")
+ }
+ // == Match unverifiedSignatureBytes
+ unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err))
+ }
+ if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v",
+ string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes)))
+ }
+
+ // == Match unverifiedPayloadBytes
+ if hashedRekordV001.Data == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`)
+ }
+ if hashedRekordV001.Data.Hash == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`)
+ }
+ if hashedRekordV001.Data.Hash.Algorithm == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
+ }
+ // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values.
+ // Eventually we should support them as well.
+ // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility
+ // issue.
+ if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm))
+ }
+ if hashedRekordV001.Data.Hash.Value == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`)
+ }
+ rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err))
+
+ }
+ unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes)
+ if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) {
+ return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match")
+ }
+
+ // == All OK; return the relevant time.
+ return time.Unix(rekorPayload.IntegratedTime, 0), nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
new file mode 100644
index 000000000..7c121cc2e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
@@ -0,0 +1,15 @@
+//go:build containers_image_rekor_stub
+// +build containers_image_rekor_stub
+
+package internal
+
+import (
+ "crypto/ecdsa"
+ "time"
+)
+
+// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
+// Returns bundle upload time on success.
+func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
+ return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
+}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
new file mode 100644
index 000000000..90a81dc1c
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
@@ -0,0 +1,239 @@
+package internal
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+const (
+ sigstoreSignatureType = "cosign container image signature"
+ sigstoreHarcodedHashAlgorithm = crypto.SHA256
+)
+
+// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
+type UntrustedSigstorePayload struct {
+ untrustedDockerManifestDigest digest.Digest
+ untrustedDockerReference string // FIXME: more precise type?
+ untrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ untrustedTimestamp *int64
+}
+
+// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
+// the specified primary contents and appropriate metadata.
+func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "containers/image " + version.Version
+ timestamp := time.Now().Unix()
+ return UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: dockerManifestDigest,
+ untrustedDockerReference: dockerReference,
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: ×tamp,
+ }
+}
+
+// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler
+var _ json.Marshaler = UntrustedSigstorePayload{}
+var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
+ if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]any{
+ "type": sigstoreSignatureType,
+ "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.untrustedDockerReference},
+ }
+ optional := map[string]any{}
+ if s.untrustedCreatorID != nil {
+ optional["creator"] = *s.untrustedCreatorID
+ }
+ if s.untrustedTimestamp != nil {
+ optional["timestamp"] = *s.untrustedTimestamp
+ }
+ signature := map[string]any{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
+ return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ // /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
+ if !bytes.Equal(optional, []byte("null")) {
+ if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return ×tamp
+ default:
+ var ignore any
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ }
+ if gotCreatorID {
+ s.untrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return NewInvalidSignatureError("Field optional.timestamp is not an integer")
+ }
+ s.untrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != sigstoreSignatureType {
+ return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ digestValue, err := digest.Parse(digestString)
+ if err != nil {
+ return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
+ }
+ s.untrustedDockerManifestDigest = digestValue
+
+ return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
+ "docker-reference": &s.untrustedDockerReference,
+ })
+}
+
+// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable.
+// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type SigstorePayloadAcceptanceRules struct {
+ ValidateSignedDockerReference func(string) error
+ ValidateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created
+// by any of the public keys in publicKeys.
+//
+// This is an internal implementation detail of VerifySigstorePayload and should have no other callers.
+// It is INSUFFICIENT alone to consider the signature acceptable.
+func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error {
+ if len(publicKeys) == 0 {
+ return errors.New("Need at least one public key to verify the sigstore payload, but got 0")
+ }
+
+ verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys))
+ for _, key := range publicKeys {
+ // Failing to load a verifier indicates that something is really, really
+ // invalid about the public key; prefer to fail even if the signature might be
+ // valid with other keys, so that users fix their fallback keys before they need them.
+ // For that reason, we even initialize all verifiers before trying to validate the signature
+ // with any key.
+ verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm)
+ if err != nil {
+ return err
+ }
+ verifiers = append(verifiers, verifier)
+ }
+
+ var failures []string
+ for _, verifier := range verifiers {
+ // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload))
+ if err == nil {
+ return nil
+ }
+
+ failures = append(failures, err.Error())
+ }
+
+ if len(failures) == 0 {
+ // Coverage: We have checked there is at least one public key, any success causes an early return,
+ // and any failure adds an entry to failures => there must be at least one error.
+ return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded")
+ }
+ return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", "))
+}
+
+// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components
+// match expected values, both as specified by rules, and returns it.
+// We return an *UntrustedSigstorePayload, although nothing actually uses it,
+// just to double-check against stupid typos.
+func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
+ unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
+ }
+
+ if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil {
+ return nil, err
+ }
+
+ var unmatchedPayload UntrustedSigstorePayload
+ if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
+ return nil, NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // SigstorePayloadAcceptanceRules have accepted this value.
+ return &unmatchedPayload, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/json.go
deleted file mode 100644
index 9e592863d..000000000
--- a/vendor/github.com/containers/image/v5/signature/json.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package signature
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
-)
-
-// jsonFormatError is returned when JSON does not match expected format.
-type jsonFormatError string
-
-func (err jsonFormatError) Error() string {
- return string(err)
-}
-
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
-// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
-// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
-//
-// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
-// we could use reflection to automate this. Later?
-func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
- seenKeys := map[string]struct{}{}
-
- dec := json.NewDecoder(bytes.NewReader(data))
- t, err := dec.Token()
- if err != nil {
- return jsonFormatError(err.Error())
- }
- if t != json.Delim('{') {
- return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
- }
- for {
- t, err := dec.Token()
- if err != nil {
- return jsonFormatError(err.Error())
- }
- if t == json.Delim('}') {
- break
- }
-
- key, ok := t.(string)
- if !ok {
- // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
- return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
- }
- if _, ok := seenKeys[key]; ok {
- return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
- }
- seenKeys[key] = struct{}{}
-
- valuePtr := fieldResolver(key)
- if valuePtr == nil {
- return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
- }
- // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
- if err := dec.Decode(valuePtr); err != nil {
- return jsonFormatError(err.Error())
- }
- }
- if _, err := dec.Token(); err != io.EOF {
- return jsonFormatError("Unexpected data after JSON object")
- }
- return nil
-}
-
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
-// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
-// must be present exactly once, and none other fields are accepted.
-func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
- seenKeys := map[string]struct{}{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
- if valuePtr, ok := exactFields[key]; ok {
- seenKeys[key] = struct{}{}
- return valuePtr
- }
- return nil
- }); err != nil {
- return err
- }
- for key := range exactFields {
- if _, ok := seenKeys[key]; !ok {
- return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
- }
- }
- return nil
-}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index 2c08c231e..1d3fe0fdc 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
@@ -6,16 +6,19 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"strings"
- "golang.org/x/crypto/openpgp"
+ // This code is used only to parse the data in an explicitly-untrusted
+ // code path, where cryptography is not relevant. For now, continue to
+ // use this frozen deprecated implementation. When mechanism_openpgp.go
+ // migrates to another implementation, this should migrate as well.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
// Each mechanism should eventually be closed by calling Close().
-// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
-// eliminate ambiguities, support CA signatures and perhaps other key properties)
type SigningMechanism interface {
// Close removes resources associated with the mechanism, if any.
Close() error
@@ -34,6 +37,15 @@ type SigningMechanism interface {
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
}
+// signingMechanismWithPassphrase is an internal extension of SigningMechanism.
+type signingMechanismWithPassphrase interface {
+ SigningMechanism
+
+ // Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error)
+}
+
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
type SigningNotSupportedError string
@@ -53,7 +65,7 @@ func NewGPGSigningMechanism() (SigningMechanism, error) {
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
- return newEphemeralGPGSigningMechanism(blob)
+ return newEphemeralGPGSigningMechanism([][]byte{blob})
}
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
@@ -70,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
if !md.IsSigned {
return nil, "", errors.New("The input is not a signature")
}
- content, err := ioutil.ReadAll(md.UnverifiedBody)
+ content, err := io.ReadAll(md.UnverifiedBody)
if err != nil {
// Coverage: An error during reading the body can happen only if
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index a0afc34b4..2b2a7ad86 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -1,14 +1,16 @@
+//go:build !containers_image_openpgp
// +build !containers_image_openpgp
package signature
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
- "github.com/mtrmac/gpgme"
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/proglottis/gpgme"
)
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
@@ -19,7 +21,7 @@ type gpgmeSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
ctx, err := newGPGMEContext(optionalDir)
if err != nil {
return nil, err
@@ -31,11 +33,11 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
}
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
-// recognizes _only_ public keys from the supplied blob, and returns the identities
+// recognizes _only_ public keys from the supplied blobs, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
- dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
+ dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
if err != nil {
return nil, nil, err
}
@@ -53,9 +55,13 @@ func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, e
ctx: ctx,
ephemeralDir: dir,
}
- keyIdentities, err := mech.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
removeDir = false
@@ -116,9 +122,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error {
return nil
}
-// Sign creates a (non-detached) signature of input using keyIdentity.
+// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil {
return nil, err
@@ -132,12 +138,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte,
if err != nil {
return nil, err
}
+
+ if passphrase != "" {
+ // Callback to write the passphrase to the specified file descriptor.
+ callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error {
+ if prevWasBad {
+ return errors.New("bad passphrase")
+ }
+ _, err := gpgmeFD.WriteString(passphrase + "\n")
+ return err
+ }
+ if err := m.ctx.SetCallback(callback); err != nil {
+ return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err)
+ }
+
+ // Loopback mode will use the callback instead of prompting the user.
+ if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil {
+ return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err)
+ }
+ }
+
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
return nil, err
}
return sigBuffer.Bytes(), nil
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
signedBuffer := bytes.Buffer{}
@@ -154,13 +186,13 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b
return nil, "", err
}
if len(sigs) != 1 {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs)))
}
sig := sigs[0]
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
// FIXME: Better error reporting eventually
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig))
}
return signedBuffer.Bytes(), sig.Fingerprint, nil
}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index a05760284..bd1979aef 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -1,3 +1,4 @@
+//go:build containers_image_openpgp
// +build containers_image_openpgp
package signature
@@ -6,14 +7,23 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"path"
"strings"
"time"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/storage/pkg/homedir"
- "golang.org/x/crypto/openpgp"
+
+ // This is a fallback code; the primary recommendation is to use the gpgme mechanism
+ // implementation, which is out-of-process and more appropriate for handling long-term private key material
+ // than any Go implementation.
+ // For this verify-only fallback, we haven't reviewed any of the
+ // existing alternatives to choose; so, for now, continue to
+ // use this frozen deprecated implementation.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
@@ -23,7 +33,7 @@ type openpgpSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
@@ -36,7 +46,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
}
}
- pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+ pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
if err != nil {
if !os.IsNotExist(err) {
return nil, err
@@ -54,14 +64,19 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
// recognizes _only_ public keys from the supplied blob, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
- keyIdentities, err := m.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
+
return m, keyIdentities, nil
}
@@ -103,10 +118,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error {
// Sign creates a (non-detached) signature of input using keyIdentity.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
@@ -116,7 +137,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
if !md.IsSigned {
return nil, "", errors.New("not signed")
}
- content, err := ioutil.ReadAll(md.UnverifiedBody)
+ content, err := io.ReadAll(md.UnverifiedBody)
if err != nil {
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
// (and possibly also signed, but it _must_ be encrypted) and the signing
@@ -130,19 +151,19 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
}
if md.SignedBy == nil {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId))
}
if md.Signature != nil {
if md.Signature.SigLifetimeSecs != nil {
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
if time.Now().After(expiry) {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry))
}
}
} else if md.SignatureV3 == nil {
// Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
// or sets md.SignatureError.
- return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
+ return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set")
}
// Uppercase the fingerprint to be compatible with gpgme
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index 82fbb68cb..8de705c22 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -15,17 +15,18 @@ package signature
import (
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
- "regexp"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/regexp"
)
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
@@ -33,10 +34,6 @@ import (
// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath
-// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
-// DO NOT change this, instead see systemDefaultPolicyPath above.
-const builtinDefaultPolicyPath = "/etc/containers/policy.json"
-
// userPolicyFile is the path to the per user policy path.
var userPolicyFile = filepath.FromSlash(".config/containers/policy.json")
@@ -54,39 +51,50 @@ func (err InvalidPolicyFormatError) Error() string {
// NOTE: When this function returns an error, report it to the user and abort.
// DO NOT hard-code fallback policies in your application.
func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
- return NewPolicyFromFile(defaultPolicyPath(sys))
+ policyPath, err := defaultPolicyPath(sys)
+ if err != nil {
+ return nil, err
+ }
+ return NewPolicyFromFile(policyPath)
}
-// defaultPolicyPath returns a path to the default policy of the system.
-func defaultPolicyPath(sys *types.SystemContext) string {
- return defaultPolicyPathWithHomeDir(sys, homedir.Get())
+// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing.
+func defaultPolicyPath(sys *types.SystemContext) (string, error) {
+ policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath)
+ if err != nil {
+ return "", err
+ }
+ return policyFilePath, nil
}
// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
-// it exists only to allow testing it with an artificial home directory.
-func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
+// it exists only to allow testing it with artificial paths.
+func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) {
if sys != nil && sys.SignaturePolicyPath != "" {
- return sys.SignaturePolicyPath
+ return sys.SignaturePolicyPath, nil
}
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
- if _, err := os.Stat(userPolicyFilePath); err == nil {
- return userPolicyFilePath
+ if err := fileutils.Exists(userPolicyFilePath); err == nil {
+ return userPolicyFilePath, nil
}
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil
}
- return systemDefaultPolicyPath
+ if err := fileutils.Exists(systemPolicyPath); err == nil {
+ return systemPolicyPath, nil
+ }
+ return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath)
}
// NewPolicyFromFile returns a policy configured in the specified file.
func NewPolicyFromFile(fileName string) (*Policy, error) {
- contents, err := ioutil.ReadFile(fileName)
+ contents, err := os.ReadFile(fileName)
if err != nil {
return nil, err
}
policy, err := NewPolicyFromBytes(contents)
if err != nil {
- return nil, errors.Wrapf(err, "invalid policy in %q", fileName)
+ return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err)
}
return policy, nil
}
@@ -108,7 +116,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "default":
return &p.Default
@@ -139,10 +147,10 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
// transport can be nil
transport := transports.Get(key)
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -185,8 +193,8 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -247,8 +255,10 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
res = &prSignedBy{}
case prTypeSignedBaseLayer:
res = &prSignedBaseLayer{}
+ case prTypeSigstoreSigned:
+ res = &prSigstoreSigned{}
default:
- return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
@@ -273,14 +283,14 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
}
if tmp.Type != prTypeInsecureAcceptAnything {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*pr = *newPRInsecureAcceptAnything()
return nil
@@ -303,26 +313,36 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
}
if tmp.Type != prTypeReject {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*pr = *newPRReject()
return nil
}
// newPRSignedBy returns a new prSignedBy if parameters are valid.
-func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
if !keyType.IsValid() {
- return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType))
+ }
+ keySources := 0
+ if keyPath != "" {
+ keySources++
}
- if len(keyPath) > 0 && len(keyData) > 0 {
- return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ if keyPaths != nil {
+ keySources++
+ }
+ if keyData != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified")
}
if signedIdentity == nil {
return nil, InvalidPolicyFormatError("signedIdentity not specified")
@@ -331,6 +351,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
prCommon: prCommon{Type: prTypeSignedBy},
KeyType: keyType,
KeyPath: keyPath,
+ KeyPaths: keyPaths,
KeyData: keyData,
SignedIdentity: signedIdentity,
}, nil
@@ -338,7 +359,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
+ return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity)
}
// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
@@ -346,9 +367,19 @@ func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity Poli
return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
}
+// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type.
+func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths
+func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity)
+}
+
// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, "", keyData, signedIdentity)
+ return newPRSignedBy(keyType, "", nil, keyData, signedIdentity)
}
// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
@@ -363,9 +394,9 @@ var _ json.Unmarshaler = (*prSignedBy)(nil)
func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
*pr = prSignedBy{}
var tmp prSignedBy
- var gotKeyPath, gotKeyData = false, false
+ var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "type":
return &tmp.Type
@@ -374,6 +405,9 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
case "keyData":
gotKeyData = true
return &tmp.KeyData
@@ -387,7 +421,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeSignedBy {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
if signedIdentity == nil {
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
@@ -402,16 +436,16 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var res *prSignedBy
var err error
switch {
- case gotKeyPath && gotKeyData:
- return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
- case gotKeyPath && !gotKeyData:
+ case gotKeyPath && !gotKeyPaths && !gotKeyData:
res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
- case !gotKeyPath && gotKeyData:
+ case !gotKeyPath && gotKeyPaths && !gotKeyData:
+ res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyPaths && gotKeyData:
res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
- case !gotKeyPath && !gotKeyData:
- return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
- default: // Coverage: This should never happen
- return errors.Errorf("Impossible keyPath/keyData presence combination!?")
+ case !gotKeyPath && !gotKeyPaths && !gotKeyData:
+ return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present")
+ default:
+ return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present")
}
if err != nil {
return err
@@ -443,7 +477,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
return err
}
if !sbKeyType(s).IsValid() {
- return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s))
}
*kt = sbKeyType(s)
return nil
@@ -473,7 +507,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@@ -481,7 +515,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeSignedBaseLayer {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
if err != nil {
@@ -517,7 +551,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
case prmTypeRemapIdentity:
res = &prmRemapIdentity{}
default:
- return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
@@ -542,14 +576,14 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchExact {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchExact()
return nil
@@ -572,14 +606,14 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchRepoDigestOrExact {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchRepoDigestOrExact()
return nil
@@ -602,14 +636,14 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchRepository {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchRepository()
return nil
@@ -619,10 +653,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
ref, err := reference.ParseNormalizedNamed(dockerReference)
if err != nil {
- return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error()))
}
if reference.IsNameOnly(ref) {
- return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference))
}
return &prmExactReference{
prmCommon: prmCommon{Type: prmTypeExactReference},
@@ -642,7 +676,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@@ -650,7 +684,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeExactReference {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMExactReference(tmp.DockerReference)
@@ -664,7 +698,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
- return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error()))
}
return &prmExactRepository{
prmCommon: prmCommon{Type: prmTypeExactRepository},
@@ -684,7 +718,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@@ -692,7 +726,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeExactRepository {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMExactRepository(tmp.DockerRepository)
@@ -706,12 +740,12 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
// Private objects for validateIdentityRemappingPrefix
var (
// remapIdentityDomainRegexp matches exactly a reference domain (name[:port])
- remapIdentityDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
+ remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
// remapIdentityDomainPrefixRegexp matches a reference that starts with a domain;
// we need this because reference.NameRegexp accepts short names with docker.io implied.
- remapIdentityDomainPrefixRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "/")
+ remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/")
// remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized)
- remapIdentityNameRegexp = regexp.MustCompile("^" + reference.NameRegexp.String() + "$")
+ remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$")
)
// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid
@@ -756,7 +790,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,
@@ -765,7 +799,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeRemapIdentity {
- return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
new file mode 100644
index 000000000..965901e18
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
@@ -0,0 +1,442 @@
+package signature
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/containers/image/v5/signature/internal"
+)
+
+// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned
+type PRSigstoreSignedOption func(*prSigstoreSigned) error
+
+// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyPath != "" {
+ return InvalidPolicyFormatError(`"keyPath" already specified`)
+ }
+ pr.KeyPath = keyPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyPaths != nil {
+ return InvalidPolicyFormatError(`"keyPaths" already specified`)
+ }
+ if len(keyPaths) == 0 {
+ return InvalidPolicyFormatError(`"keyPaths" contains no entries`)
+ }
+ pr.KeyPaths = keyPaths
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyData != nil {
+ return InvalidPolicyFormatError(`"keyData" already specified`)
+ }
+ pr.KeyData = keyData
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyDatas != nil {
+ return InvalidPolicyFormatError(`"keyDatas" already specified`)
+ }
+ if len(keyDatas) == 0 {
+ return InvalidPolicyFormatError(`"keyDatas" contains no entries`)
+ }
+ pr.KeyDatas = keyDatas
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.Fulcio != nil {
+ return InvalidPolicyFormatError(`"fulcio" already specified`)
+ }
+ pr.Fulcio = fulcio
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyPath != "" {
+ return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`)
+ }
+ pr.RekorPublicKeyPath = rekorPublicKeyPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyPaths != nil {
+ return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`)
+ }
+ if len(rekorPublickeyPaths) == 0 {
+ return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`)
+ }
+ pr.RekorPublicKeyPaths = rekorPublickeyPaths
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyData != nil {
+ return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`)
+ }
+ pr.RekorPublicKeyData = rekorPublicKeyData
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyDatas != nil {
+ return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`)
+ }
+ if len(rekorPublickeyDatas) == 0 {
+ return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`)
+ }
+ pr.RekorPublicKeyDatas = rekorPublickeyDatas
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.SignedIdentity != nil {
+ return InvalidPolicyFormatError(`"signedIdentity" already specified`)
+ }
+ pr.SignedIdentity = signedIdentity
+ return nil
+ }
+}
+
+// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type.
+func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) {
+ res := prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ }
+ for _, o := range options {
+ if err := o(&res); err != nil {
+ return nil, err
+ }
+ }
+
+ keySources := 0
+ if res.KeyPath != "" {
+ keySources++
+ }
+ if res.KeyPaths != nil {
+ keySources++
+ }
+ if res.KeyData != nil {
+ keySources++
+ }
+ if res.KeyDatas != nil {
+ keySources++
+ }
+ if res.Fulcio != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified")
+ }
+
+ rekorSources := 0
+ if res.RekorPublicKeyPath != "" {
+ rekorSources++
+ }
+ if res.RekorPublicKeyPaths != nil {
+ rekorSources++
+ }
+ if res.RekorPublicKeyData != nil {
+ rekorSources++
+ }
+ if res.RekorPublicKeyDatas != nil {
+ rekorSources++
+ }
+ if rekorSources > 1 {
+ return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously")
+ }
+ if res.Fulcio != nil && rekorSources == 0 {
+ return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used")
+ }
+
+ if res.SignedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+
+ return &res, nil
+}
+
+// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options.
+func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) {
+ return newPRSigstoreSigned(options...)
+}
+
+// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
+func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath(keyPath),
+ PRSigstoreSignedWithSignedIdentity(signedIdentity),
+ )
+}
+
+// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
+func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyData(keyData),
+ PRSigstoreSignedWithSignedIdentity(signedIdentity),
+ )
+}
+
+// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
+ *pr = prSigstoreSigned{}
+ var tmp prSigstoreSigned
+ var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool
+ var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool
+ var fulcio prSigstoreSignedFulcio
+ var signedIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "keyDatas":
+ gotKeyDatas = true
+ return &tmp.KeyDatas
+ case "fulcio":
+ gotFulcio = true
+ return &fulcio
+ case "rekorPublicKeyPath":
+ gotRekorPublicKeyPath = true
+ return &tmp.RekorPublicKeyPath
+ case "rekorPublicKeyPaths":
+ gotRekorPublicKeyPaths = true
+ return &tmp.RekorPublicKeyPaths
+ case "rekorPublicKeyData":
+ gotRekorPublicKeyData = true
+ return &tmp.RekorPublicKeyData
+ case "rekorPublicKeyDatas":
+ gotRekorPublicKeyDatas = true
+ return &tmp.RekorPublicKeyDatas
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSigstoreSigned {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var opts []PRSigstoreSignedOption
+ if gotKeyPath {
+ opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
+ }
+ if gotKeyPaths {
+ opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths))
+ }
+ if gotKeyData {
+ opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
+ }
+ if gotKeyDatas {
+ opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas))
+ }
+ if gotFulcio {
+ opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
+ }
+ if gotRekorPublicKeyPath {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
+ }
+ if gotRekorPublicKeyPaths {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths))
+ }
+ if gotRekorPublicKeyData {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
+ }
+ if gotRekorPublicKeyDatas {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas))
+ }
+ opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
+
+ res, err := newPRSigstoreSigned(opts...)
+ if err != nil {
+ return err
+ }
+ *pr = *res
+ return nil
+}
+
+// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio
+type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
+
+// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.CAPath != "" {
+ return InvalidPolicyFormatError(`"caPath" already specified`)
+ }
+ f.CAPath = caPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.CAData != nil {
+ return InvalidPolicyFormatError(`"caData" already specified`)
+ }
+ f.CAData = caData
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.OIDCIssuer != "" {
+ return InvalidPolicyFormatError(`"oidcIssuer" already specified`)
+ }
+ f.OIDCIssuer = oidcIssuer
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.SubjectEmail != "" {
+ return InvalidPolicyFormatError(`"subjectEmail" already specified`)
+ }
+ f.SubjectEmail = subjectEmail
+ return nil
+ }
+}
+
+// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type
+func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) {
+ res := prSigstoreSignedFulcio{}
+ for _, o := range options {
+ if err := o(&res); err != nil {
+ return nil, err
+ }
+ }
+
+ if res.CAPath != "" && res.CAData != nil {
+ return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously")
+ }
+ if res.CAPath == "" && res.CAData == nil {
+ return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified")
+ }
+ if res.OIDCIssuer == "" {
+ return nil, InvalidPolicyFormatError("oidcIssuer not specified")
+ }
+ if res.SubjectEmail == "" {
+ return nil, InvalidPolicyFormatError("subjectEmail not specified")
+ }
+
+ return &res, nil
+}
+
+// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options.
+func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) {
+ return newPRSigstoreSignedFulcio(options...)
+}
+
+// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil)
+
+func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
+ *f = prSigstoreSignedFulcio{}
+ var tmp prSigstoreSignedFulcio
+ var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "caPath":
+ gotCAPath = true
+ return &tmp.CAPath
+ case "caData":
+ gotCAData = true
+ return &tmp.CAData
+ case "oidcIssuer":
+ gotOIDCIssuer = true
+ return &tmp.OIDCIssuer
+ case "subjectEmail":
+ gotSubjectEmail = true
+ return &tmp.SubjectEmail
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ var opts []PRSigstoreSignedFulcioOption
+ if gotCAPath {
+ opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath))
+ }
+ if gotCAData {
+ opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData))
+ }
+ if gotOIDCIssuer {
+ opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer))
+ }
+ if gotSubjectEmail {
+ opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail))
+ }
+
+ res, err := newPRSigstoreSignedFulcio(opts...)
+ if err != nil {
+ return err
+ }
+
+ *f = *res
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go
index edcbf52f4..ab6b89c26 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go
@@ -7,9 +7,11 @@ package signature
import (
"context"
+ "fmt"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -44,7 +46,7 @@ type PolicyRequirement interface {
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
- // - sarUnknown if if this PolicyRequirement does not deal with signatures.
+ // - sarUnknown if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for further processing,
@@ -55,14 +57,14 @@ type PolicyRequirement interface {
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
- isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+ isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
// isRunningImageAllowed returns true if the requirement allows running an image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
- isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error)
+ isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error)
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
@@ -71,7 +73,7 @@ type PolicyReferenceMatch interface {
// matchesDockerReference decides whether a specific image identity is accepted for an image
// (or, usually, for the image's Reference().DockerReference()). Note that
// image.Reference().DockerReference() may be nil.
- matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
+ matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool
}
// PolicyContext encapsulates a policy and possible cached state
@@ -92,10 +94,10 @@ const (
pcDestroyed policyContextState = "Destroyed"
)
-// changeContextState changes pc.state, or fails if the state is unexpected
+// changeState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
- return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state)
}
pc.state = new
return nil
@@ -138,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if req, ok := transportScopes[identity]; ok {
- logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
+ logrus.Debugf(` Using transport %q policy section %q`, transportName, identity)
return req
}
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if req, ok := transportScopes[name]; ok {
- logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
+ logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name)
return req
}
}
// Look for a default match for the transport.
if req, ok := transportScopes[""]; ok {
- logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
+ logrus.Debugf(` Using transport %q policy section ""`, transportName)
return req
}
}
@@ -170,11 +172,11 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
-// - Do not use a an existence of an accepted signature to determine whether to run
-// a container based on this image; use IsRunningImageAllowed instead.
-// - Just because a signature is accepted does not automatically mean the contents of the
-// signature are authorized to run code as root, or to affect system or cluster configuration.
-func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) {
+// - Do not use a an existence of an accepted signature to determine whether to run
+// a container based on this image; use IsRunningImageAllowed instead.
+// - Just because a signature is accepted does not automatically mean the contents of the
+// signature are authorized to run code as root, or to affect system or cluster configuration.
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err
}
@@ -185,11 +187,12 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
- // FIXME: rename Signatures to UnverifiedSignatures
- // FIXME: pass context.Context
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!)
unverifiedSignatures, err := image.Signatures(ctx)
if err != nil {
return nil, err
@@ -255,7 +258,7 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
-func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) {
+func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return false, err
}
@@ -266,6 +269,8 @@ func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
index 55cdd3054..a8bc01301 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
@@ -5,15 +5,15 @@ package signature
import (
"context"
- "github.com/containers/image/v5/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/sirupsen/logrus"
)
-func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarUnknown, nil, nil
}
-func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
// FIXME? Reject this at policy parsing time already?
logrus.Errorf("signedBaseLayer not implemented yet!")
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index 26cca4759..e5c932918 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -4,44 +4,44 @@ package signature
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
- "strings"
+ "slices"
+ "github.com/containers/image/v5/internal/multierr"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
-func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
switch pr.KeyType {
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
- return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
- return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType))
}
- if pr.KeyPath != "" && pr.KeyData != nil {
- return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
- }
// FIXME: move this to per-context initialization
- var data []byte
- if pr.KeyData != nil {
- data = pr.KeyData
- } else {
- d, err := ioutil.ReadFile(pr.KeyPath)
- if err != nil {
- return sarRejected, nil, err
- }
- data = d
+ const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`
+ data, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: notOneSourceErrorText,
+ path: pr.KeyPath,
+ paths: pr.KeyPaths,
+ data: pr.KeyData,
+ })
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ if data == nil {
+ return sarRejected, nil, errors.New(notOneSourceErrorText)
}
// FIXME: move this to per-context initialization
- mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
+ mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data)
if err != nil {
return sarRejected, nil, err
}
@@ -52,10 +52,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
- for _, trustedIdentity := range trustedIdentities {
- if keyIdentity == trustedIdentity {
- return nil
- }
+ if slices.Contains(trustedIdentities, keyIdentity) {
+ return nil
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.
@@ -63,7 +61,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
},
validateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
- return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
}
return nil
},
@@ -89,8 +87,9 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
return sarAccepted, signature, nil
}
-func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
- // FIXME: pass context.Context
+func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages
+ // (needs tests!)
sigs, err := image.Signatures(ctx)
if err != nil {
return false, err
@@ -108,7 +107,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.Unp
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
- reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
}
rejections = append(rejections, reason)
}
@@ -119,12 +118,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.Unp
case 1:
summary = rejections[0]
default:
- var msgs []string
- for _, e := range rejections {
- msgs = append(msgs, e.Error())
- }
- summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
- strings.Join(msgs, "; ")))
+ summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
}
return false, summary
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
new file mode 100644
index 000000000..9c553771c
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
@@ -0,0 +1,346 @@
+// Policy evaluation for prSigstoreSigned.
+
+package signature
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/internal/multierr"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// configBytesSources contains configuration fields which may result in one or more []byte values
+type configBytesSources struct {
+ inconsistencyErrorMessage string // Error to return if more than one source is set
+ path string // …Path: a path to a file containing the data, or ""
+ paths []string // …Paths: paths to files containing the data, or nil
+ data []byte // …Data: a single instance ofhe raw data, or nil
+ datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas
+}
+
+// loadBytesFromConfigSources ensures at most one of the sources in src is set,
+// and returns the referenced data, or nil if neither is set.
+func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) {
+ sources := 0
+ var data [][]byte // = nil
+ if src.path != "" {
+ sources++
+ d, err := os.ReadFile(src.path)
+ if err != nil {
+ return nil, err
+ }
+ data = [][]byte{d}
+ }
+ if src.paths != nil {
+ sources++
+ data = [][]byte{}
+ for _, path := range src.paths {
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, d)
+ }
+ }
+ if src.data != nil {
+ sources++
+ data = [][]byte{src.data}
+ }
+ if src.datas != nil { // codespell:ignore datas
+ sources++
+ data = src.datas // codespell:ignore datas
+ }
+ if sources > 1 {
+ return nil, errors.New(src.inconsistencyErrorMessage)
+ }
+ return data, nil
+}
+
+// prepareTrustRoot creates a fulcioTrustRoot from the input data.
+// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
+func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
+ caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`,
+ path: f.CAPath,
+ data: f.CAData,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(caCertPEMs) != 1 {
+ return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`)
+ }
+ certs := x509.NewCertPool()
+ if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok {
+ return nil, errors.New("error loading Fulcio CA certificates")
+ }
+ fulcio := fulcioTrustRoot{
+ caCertificates: certs,
+ oidcIssuer: f.OIDCIssuer,
+ subjectEmail: f.SubjectEmail,
+ }
+ if err := fulcio.validate(); err != nil {
+ return nil, err
+ }
+ return &fulcio, nil
+}
+
+// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
+type sigstoreSignedTrustRoot struct {
+ publicKeys []crypto.PublicKey
+ fulcio *fulcioTrustRoot
+ rekorPublicKeys []*ecdsa.PublicKey
+}
+
+func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
+ res := sigstoreSignedTrustRoot{}
+
+ publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`,
+ path: pr.KeyPath,
+ paths: pr.KeyPaths,
+ data: pr.KeyData,
+ datas: pr.KeyDatas, // codespell:ignore datas
+ })
+ if err != nil {
+ return nil, err
+ }
+ if publicKeyPEMs != nil {
+ for index, keyData := range publicKeyPEMs {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData)
+ if err != nil {
+ return nil, fmt.Errorf("parsing public key %d: %w", index+1, err)
+ }
+ res.publicKeys = append(res.publicKeys, pk)
+ }
+ if len(res.publicKeys) == 0 {
+ return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`)
+ }
+ }
+
+ if pr.Fulcio != nil {
+ f, err := pr.Fulcio.prepareTrustRoot()
+ if err != nil {
+ return nil, err
+ }
+ res.fulcio = f
+ }
+
+ rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`,
+ path: pr.RekorPublicKeyPath,
+ paths: pr.RekorPublicKeyPaths,
+ data: pr.RekorPublicKeyData,
+ datas: pr.RekorPublicKeyDatas, // codespell:ignore datas
+ })
+ if err != nil {
+ return nil, err
+ }
+ if rekorPublicKeyPEMs != nil {
+ for index, pem := range rekorPublicKeyPEMs {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem)
+ if err != nil {
+ return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err)
+ }
+ pkECDSA, ok := pk.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1)
+
+ }
+ res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA)
+ }
+ if len(res.rekorPublicKeys) == 0 {
+ return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`)
+ }
+ }
+
+ return &res, nil
+}
+
+func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // We don’t know of a single user of this API, and we might return unexpected values in Signature.
+ // For now, just punt.
+ return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore")
+}
+
+func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) {
+ // FIXME: move this to per-context initialization
+ trustRoot, err := pr.prepareTrustRoot()
+ if err != nil {
+ return sarRejected, err
+ }
+
+ untrustedAnnotations := sig.UntrustedAnnotations()
+ untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey]
+ if !ok {
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey)
+ }
+ untrustedPayload := sig.UntrustedPayload()
+
+ var publicKeys []crypto.PublicKey
+ switch {
+ case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
+ case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
+
+ case trustRoot.publicKeys != nil:
+ if trustRoot.rekorPublicKeys != nil {
+ untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
+ }
+
+ var rekorFailures []string
+ for _, candidatePublicKey := range trustRoot.publicKeys {
+ // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
+ // FIXME: We could just generate DER instead of the full PEM text
+ recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey)
+ if err != nil {
+ // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
+ // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
+ return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
+ }
+ // We don’t care about the Rekor timestamp, just about log presence.
+ _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload)
+ if err == nil {
+ publicKeys = append(publicKeys, candidatePublicKey)
+ break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate
+ }
+ rekorFailures = append(rekorFailures, err.Error())
+ }
+ if len(publicKeys) == 0 {
+ if len(rekorFailures) == 0 {
+ // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure.
+ return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`)
+ }
+ return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", ")))
+ }
+ } else {
+ publicKeys = trustRoot.publicKeys
+ }
+
+ case trustRoot.fulcio != nil:
+ if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
+ }
+ untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
+ }
+ untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey)
+ }
+ var untrustedIntermediateChainBytes []byte
+ if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
+ untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
+ }
+ pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio,
+ []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
+ if err != nil {
+ return sarRejected, err
+ }
+ publicKeys = []crypto.PublicKey{pk}
+ }
+
+ if len(publicKeys) == 0 {
+ // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set,
+ // and we have already excluded the possibility in the switch above.
+ return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
+ }
+ signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
+ }
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, err
+ }
+ if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values
+ return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen.
+ }
+
+ return sarAccepted, nil
+}
+
+func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ sigs, err := image.UntrustedSignatures(ctx)
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ foundNonSigstoreSignatures := 0
+ foundSigstoreNonAttachments := 0
+ for _, s := range sigs {
+ sigstoreSig, ok := s.(signature.Sigstore)
+ if !ok {
+ foundNonSigstoreSignatures++
+ continue
+ }
+ if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType {
+ foundSigstoreNonAttachments++
+ continue
+ }
+
+ var reason error
+ switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 {
+ // A nice message for the most common case.
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ } else {
+ summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)",
+ foundNonSigstoreSignatures, foundSigstoreNonAttachments))
+ }
+ case 1:
+ summary = rejections[0]
+ default:
+ summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
+ }
+ return false, summary
+}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
index f949088b5..031866f0d 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
@@ -6,24 +6,24 @@ import (
"context"
"fmt"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
-func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
// prInsecureAcceptAnything semantics: Every image is allowed to run,
// but this does not consider the signature as verified.
return sarUnknown, nil, nil
}
-func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return true, nil
}
-func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
}
-func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go
new file mode 100644
index 000000000..290fc2459
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go
@@ -0,0 +1,8 @@
+//go:build !freebsd
+// +build !freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/etc/containers/policy.json"
diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go
new file mode 100644
index 000000000..702b7171f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go
@@ -0,0 +1,8 @@
+//go:build freebsd
+// +build freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json"
diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
index 064866cf6..390957b02 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
@@ -7,12 +7,12 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
-func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
r1 := image.Reference().DockerReference()
if r1 == nil {
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
@@ -25,7 +25,7 @@ func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (referen
return r1, r2, nil
}
-func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -56,7 +56,7 @@ func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named)
return false
}
}
-func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -64,7 +64,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse
return matchRepoDigestOrExactReferenceValues(intended, signature)
}
-func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -85,7 +85,7 @@ func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, err
return r1, r2, nil
}
-func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
if err != nil {
return false
@@ -97,7 +97,7 @@ func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage,
return signature.String() == intended.String()
}
-func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
if err != nil {
return false
@@ -136,12 +136,12 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
- return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err)
+ return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err)
}
return newParsedRef, nil
}
-func (prm *prmRemapIdentity) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go
index c6819929b..32aa1c0ad 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_types.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_types.go
@@ -46,6 +46,7 @@ const (
prTypeReject prTypeIdentifier = "reject"
prTypeSignedBy prTypeIdentifier = "signedBy"
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+ prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned"
)
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
@@ -66,18 +67,20 @@ type prReject struct {
type prSignedBy struct {
prCommon
- // KeyType specifies what kind of key reference KeyPath/KeyData is.
+ // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is.
// Acceptable values are “GPGKeys†| “signedByGPGKeys†“X.509Certificates†| “signedByX.509CAsâ€
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
KeyType sbKeyType `json:"keyType"`
- // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
- // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
- // Defaults to "match-exact" if not specified.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
}
@@ -104,6 +107,66 @@ type prSignedBaseLayer struct {
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
}
+// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity
+type prSigstoreSigned struct {
+ prCommon
+
+ // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+ // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyDatas [][]byte `json:"keyDatas,omitempty"`
+
+ // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
+ Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
+
+ // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
+ // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"`
+ // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
+ // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"`
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
+ // Note that /usr/bin/cosign interoperability might require using repo-only matching.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
+// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement.
+// This is a public type with a single private implementation.
+type PRSigstoreSignedFulcio interface {
+ // toFulcioTrustRoot creates a fulcioTrustRoot from the input data.
+ // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
+ prepareTrustRoot() (*fulcioTrustRoot, error)
+}
+
+// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned
+type prSigstoreSignedFulcio struct {
+ // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified.
+ CAPath string `json:"caPath,omitempty"`
+ // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified.
+ CAData []byte `json:"caData,omitempty"`
+ // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates.
+ OIDCIssuer string `json:"oidcIssuer,omitempty"`
+ // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates.
+ SubjectEmail string `json:"subjectEmail,omitempty"`
+}
+
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go
deleted file mode 100644
index 09f4f85e0..000000000
--- a/vendor/github.com/containers/image/v5/signature/signature.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Note: Consider the API unstable until the code supports at least three different image formats or transports.
-
-// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
-
-package signature
-
-import (
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/containers/image/v5/version"
- digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-const (
- signatureType = "atomic container signature"
-)
-
-// InvalidSignatureError is returned when parsing an invalid signature.
-type InvalidSignatureError struct {
- msg string
-}
-
-func (err InvalidSignatureError) Error() string {
- return err.msg
-}
-
-// Signature is a parsed content of a signature.
-// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
-type Signature struct {
- DockerManifestDigest digest.Digest
- DockerReference string // FIXME: more precise type?
-}
-
-// untrustedSignature is a parsed content of a signature.
-type untrustedSignature struct {
- UntrustedDockerManifestDigest digest.Digest
- UntrustedDockerReference string // FIXME: more precise type?
- UntrustedCreatorID *string
- // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
- // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
- // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
- // we would add another field, UntrustedTimestampNS int64.
- UntrustedTimestamp *int64
-}
-
-// UntrustedSignatureInformation is information available in an untrusted signature.
-// This may be useful when debugging signature verification failures,
-// or when managing a set of signatures on a single image.
-//
-// WARNING: Do not use the contents of this for ANY security decisions,
-// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable.
-// There is NO REASON to expect the values to be correct, or not intentionally misleading
-// (including things like “✅ Verified by $authorityâ€)
-type UntrustedSignatureInformation struct {
- UntrustedDockerManifestDigest digest.Digest
- UntrustedDockerReference string // FIXME: more precise type?
- UntrustedCreatorID *string
- UntrustedTimestamp *time.Time
- UntrustedShortKeyIdentifier string
-}
-
-// newUntrustedSignature returns an untrustedSignature object with
-// the specified primary contents and appropriate metadata.
-func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
- // Use intermediate variables for these values so that we can take their addresses.
- // Golang guarantees that they will have a new address on every execution.
- creatorID := "atomic " + version.Version
- timestamp := time.Now().Unix()
- return untrustedSignature{
- UntrustedDockerManifestDigest: dockerManifestDigest,
- UntrustedDockerReference: dockerReference,
- UntrustedCreatorID: &creatorID,
- UntrustedTimestamp: ×tamp,
- }
-}
-
-// Compile-time check that untrustedSignature implements json.Marshaler
-var _ json.Marshaler = (*untrustedSignature)(nil)
-
-// MarshalJSON implements the json.Marshaler interface.
-func (s untrustedSignature) MarshalJSON() ([]byte, error) {
- if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
- return nil, errors.New("Unexpected empty signature content")
- }
- critical := map[string]interface{}{
- "type": signatureType,
- "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
- "identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
- }
- optional := map[string]interface{}{}
- if s.UntrustedCreatorID != nil {
- optional["creator"] = *s.UntrustedCreatorID
- }
- if s.UntrustedTimestamp != nil {
- optional["timestamp"] = *s.UntrustedTimestamp
- }
- signature := map[string]interface{}{
- "critical": critical,
- "optional": optional,
- }
- return json.Marshal(signature)
-}
-
-// Compile-time check that untrustedSignature implements json.Unmarshaler
-var _ json.Unmarshaler = (*untrustedSignature)(nil)
-
-// UnmarshalJSON implements the json.Unmarshaler interface
-func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
- err := s.strictUnmarshalJSON(data)
- if err != nil {
- if formatErr, ok := err.(jsonFormatError); ok {
- err = InvalidSignatureError{msg: formatErr.Error()}
- }
- }
- return err
-}
-
-// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
-// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
-func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
- var critical, optional json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
- "critical": &critical,
- "optional": &optional,
- }); err != nil {
- return err
- }
-
- var creatorID string
- var timestamp float64
- var gotCreatorID, gotTimestamp = false, false
- if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
- switch key {
- case "creator":
- gotCreatorID = true
- return &creatorID
- case "timestamp":
- gotTimestamp = true
- return ×tamp
- default:
- var ignore interface{}
- return &ignore
- }
- }); err != nil {
- return err
- }
- if gotCreatorID {
- s.UntrustedCreatorID = &creatorID
- }
- if gotTimestamp {
- intTimestamp := int64(timestamp)
- if float64(intTimestamp) != timestamp {
- return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
- }
- s.UntrustedTimestamp = &intTimestamp
- }
-
- var t string
- var image, identity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
- "type": &t,
- "image": &image,
- "identity": &identity,
- }); err != nil {
- return err
- }
- if t != signatureType {
- return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
- }
-
- var digestString string
- if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
- "docker-manifest-digest": &digestString,
- }); err != nil {
- return err
- }
- s.UntrustedDockerManifestDigest = digest.Digest(digestString)
-
- return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
- "docker-reference": &s.UntrustedDockerReference,
- })
-}
-
-// Sign formats the signature and returns a blob signed using mech and keyIdentity
-// (If it seems surprising that this is a method on untrustedSignature, note that there
-// isn’t a good reason to think that a key used by the user is trusted by any component
-// of the system just because it is a private key — actually the presence of a private key
-// on the system increases the likelihood of an a successful attack on that private key
-// on that particular system.)
-func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
- json, err := json.Marshal(s)
- if err != nil {
- return nil, err
- }
-
- return mech.Sign(json, keyIdentity)
-}
-
-// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
-// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
-// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
-// because the functions have the same or similar types, so there is a risk of exchanging the functions;
-// named members of this struct are more explicit.
-type signatureAcceptanceRules struct {
- validateKeyIdentity func(string) error
- validateSignedDockerReference func(string) error
- validateSignedDockerManifestDigest func(digest.Digest) error
-}
-
-// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
-// match expected values, both as specified by rules, and returns it
-func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
- signed, keyIdentity, err := mech.Verify(unverifiedSignature)
- if err != nil {
- return nil, err
- }
- if err := rules.validateKeyIdentity(keyIdentity); err != nil {
- return nil, err
- }
-
- var unmatchedSignature untrustedSignature
- if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
- }
- if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
- return nil, err
- }
- if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
- return nil, err
- }
- // signatureAcceptanceRules have accepted this value.
- return &Signature{
- DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
- DockerReference: unmatchedSignature.UntrustedDockerReference,
- }, nil
-}
-
-// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
-// WITHOUT doing any cryptographic verification.
-// This may be useful when debugging signature verification failures,
-// or when managing a set of signatures on a single image.
-//
-// WARNING: Do not use the contents of this for ANY security decisions,
-// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable.
-// There is NO REASON to expect the values to be correct, or not intentionally misleading
-// (including things like “✅ Verified by $authorityâ€)
-func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
- // NOTE: This should eventually do format autodetection.
- mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
- if err != nil {
- return nil, err
- }
- defer mech.Close()
-
- untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
- if err != nil {
- return nil, err
- }
- var untrustedDecodedContents untrustedSignature
- if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
- }
-
- var timestamp *time.Time // = nil
- if untrustedDecodedContents.UntrustedTimestamp != nil {
- ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
- timestamp = &ts
- }
- return &UntrustedSignatureInformation{
- UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
- UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
- UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
- UntrustedTimestamp: timestamp,
- UntrustedShortKeyIdentifier: shortKeyIdentifier,
- }, nil
-}
diff --git a/vendor/github.com/containers/image/v5/signature/signer/signer.go b/vendor/github.com/containers/image/v5/signature/signer/signer.go
new file mode 100644
index 000000000..73ae550aa
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/signer/signer.go
@@ -0,0 +1,9 @@
+package signer
+
+import "github.com/containers/image/v5/internal/signer"
+
+// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// It can only be created from within the containers/image package; it can’t be implemented externally.
+//
+// The owner of a Signer must call Close() when done.
+type Signer = signer.Signer
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
new file mode 100644
index 000000000..2e510f60e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
@@ -0,0 +1,103 @@
+package sigstore
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/secure-systems-lab/go-securesystemslib/encrypted"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// The following code was copied from github.com/sigstore.
+// FIXME: Eliminate that duplication.
+
+// Copyright 2021 The Sigstore Authors.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const (
+ // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType.
+ cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
+ // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType.
+ sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY"
+)
+
+// from sigstore/cosign/pkg/cosign.loadPrivateKey
+// FIXME: Do we need all of these key formats?
+func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
+ // Decrypt first
+ p, _ := pem.Decode(key)
+ if p == nil {
+ return nil, errors.New("invalid pem block")
+ }
+ if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType {
+ return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
+ }
+
+ x509Encoded, err := encrypted.Decrypt(p.Bytes, pass)
+ if err != nil {
+ return nil, fmt.Errorf("decrypt: %w", err)
+ }
+
+ pk, err := x509.ParsePKCS8PrivateKey(x509Encoded)
+ if err != nil {
+ return nil, fmt.Errorf("parsing private key: %w", err)
+ }
+ switch pk := pk.(type) {
+ case *rsa.PrivateKey:
+ return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256)
+ case *ecdsa.PrivateKey:
+ return signature.LoadECDSASignerVerifier(pk, crypto.SHA256)
+ case ed25519.PrivateKey:
+ return signature.LoadED25519SignerVerifier(pk)
+ default:
+ return nil, errors.New("unsupported key type")
+ }
+}
+
+// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair
+// loadPrivateKey always requires a encryption, so this always requires a passphrase.
+func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) {
+ x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("x509 encoding private key: %w", err)
+ }
+
+ encBytes, err := encrypted.Encrypt(x509Encoded, password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // store in PEM format
+ privBytes := pem.EncodeToMemory(&pem.Block{
+ Bytes: encBytes,
+ // Use the older “COSIGN†type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE†types,
+ // but a version of cosign that can accept them has not yet been released.
+ Type: cosignPrivateKeyPemType,
+ })
+
+ // Now do the public key
+ pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return privBytes, pubBytes, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/generate.go b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go
new file mode 100644
index 000000000..77520c123
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go
@@ -0,0 +1,35 @@
+package sigstore
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+)
+
+// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller.
+type GenerateKeyPairResult struct {
+ PublicKey []byte
+ PrivateKey []byte
+}
+
+// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format,
+// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase).
+// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API,
+// and can change with best practices over time.
+func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) {
+ // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes
+ // only requires ECDSA-P256 to be supported, so that’s what we must use.
+ rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ // Coverage: This can fail only if the randomness source fails
+ return nil, err
+ }
+ private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase)
+ if err != nil {
+ return nil, err
+ }
+ return &GenerateKeyPairResult{
+ PublicKey: public,
+ PrivateKey: private,
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
new file mode 100644
index 000000000..c6258f408
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
@@ -0,0 +1,95 @@
+package internal
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+type Option func(*SigstoreSigner) error
+
+// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures.
+// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the
+// dependency impact.
+type SigstoreSigner struct {
+ PrivateKey sigstoreSignature.Signer // May be nil during initialization
+ SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey
+
+ // Fulcio results to include
+ FulcioGeneratedCertificate []byte // Or nil
+ FulcioGeneratedCertificateChain []byte // Or nil
+
+ // Rekor state
+ RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+func (s *SigstoreSigner) ProgressMessage() string {
+ return "Signing image using a sigstore signature"
+}
+
+// SignImageManifest creates a new signature for manifest m as dockerReference.
+func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
+ if s.PrivateKey == nil {
+ return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner")
+ }
+
+ if reference.IsNameOnly(dockerReference) {
+ return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return nil, err
+ }
+ // sigstore/cosign completely ignores dockerReference for actual policy decisions.
+ // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
+ // So, just do what simple signing does, and cosign won’t mind.
+ payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
+ payloadBytes, err := json.Marshal(payloadData)
+ if err != nil {
+ return nil, err
+ }
+
+ // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes))
+ if err != nil {
+ return nil, fmt.Errorf("creating signature: %w", err)
+ }
+ base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
+ var rekorSETBytes []byte // = nil
+ if s.RekorUploader != nil {
+ set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes)
+ if err != nil {
+ return nil, err
+ }
+ rekorSETBytes = set
+ }
+
+ annotations := map[string]string{
+ signature.SigstoreSignatureAnnotationKey: base64Signature,
+ }
+ if s.FulcioGeneratedCertificate != nil {
+ annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate)
+ }
+ if s.FulcioGeneratedCertificateChain != nil {
+ annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain)
+ }
+ if rekorSETBytes != nil {
+ annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes)
+ }
+ return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil
+}
+
+func (s *SigstoreSigner) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go
new file mode 100644
index 000000000..fb825ada9
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go
@@ -0,0 +1,60 @@
+package sigstore
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature/signer"
+ "github.com/containers/image/v5/signature/sigstore/internal"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+type Option = internal.Option
+
+func WithPrivateKeyFile(file string, passphrase []byte) Option {
+ return func(s *internal.SigstoreSigner) error {
+ if s.PrivateKey != nil {
+ return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
+ }
+
+ if passphrase == nil {
+ return errors.New("private key passphrase not provided")
+ }
+
+ privateKeyPEM, err := os.ReadFile(file)
+ if err != nil {
+ return fmt.Errorf("reading private key from %s: %w", file, err)
+ }
+ signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase)
+ if err != nil {
+ return fmt.Errorf("initializing private key: %w", err)
+ }
+ publicKey, err := signerVerifier.PublicKey()
+ if err != nil {
+ return fmt.Errorf("getting public key from private key: %w", err)
+ }
+ publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
+ if err != nil {
+ return fmt.Errorf("converting public key to PEM: %w", err)
+ }
+ s.PrivateKey = signerVerifier
+ s.SigningKeyOrCert = publicKeyPEM
+ return nil
+ }
+}
+
+func NewSigner(opts ...Option) (*signer.Signer, error) {
+ s := internal.SigstoreSigner{}
+ for _, o := range opts {
+ if err := o(&s); err != nil {
+ return nil, err
+ }
+ }
+ if s.PrivateKey == nil {
+ return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures")
+ }
+
+ return internalSigner.NewSigner(&s), nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go
new file mode 100644
index 000000000..94a846593
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/simple.go
@@ -0,0 +1,281 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
+
+package signature
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ signatureType = "atomic container signature"
+)
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+type InvalidSignatureError = internal.InvalidSignatureError
+
+// Signature is a parsed content of a signature.
+// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
+type Signature struct {
+ DockerManifestDigest digest.Digest
+ DockerReference string // FIXME: more precise type?
+}
+
+// untrustedSignature is a parsed content of a signature.
+type untrustedSignature struct {
+ untrustedDockerManifestDigest digest.Digest
+ untrustedDockerReference string // FIXME: more precise type?
+ untrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ untrustedTimestamp *int64
+}
+
+// UntrustedSignatureInformation is information available in an untrusted signature.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authorityâ€)
+type UntrustedSignatureInformation struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ UntrustedTimestamp *time.Time
+ UntrustedShortKeyIdentifier string
+}
+
+// newUntrustedSignature returns an untrustedSignature object with
+// the specified primary contents and appropriate metadata.
+func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "atomic " + version.Version
+ timestamp := time.Now().Unix()
+ return untrustedSignature{
+ untrustedDockerManifestDigest: dockerManifestDigest,
+ untrustedDockerReference: dockerReference,
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: ×tamp,
+ }
+}
+
+// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler
+var _ json.Marshaler = untrustedSignature{}
+var _ json.Marshaler = (*untrustedSignature)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s untrustedSignature) MarshalJSON() ([]byte, error) {
+ if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]any{
+ "type": signatureType,
+ "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.untrustedDockerReference},
+ }
+ optional := map[string]any{}
+ if s.untrustedCreatorID != nil {
+ optional["creator"] = *s.untrustedCreatorID
+ }
+ if s.untrustedTimestamp != nil {
+ optional["timestamp"] = *s.untrustedTimestamp
+ }
+ signature := map[string]any{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that untrustedSignature implements json.Unmarshaler
+var _ json.Unmarshaler = (*untrustedSignature)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
+ return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
+// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return ×tamp
+ default:
+ var ignore any
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ if gotCreatorID {
+ s.untrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer")
+ }
+ s.untrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != signatureType {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ digestValue, err := digest.Parse(digestString)
+ if err != nil {
+ return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
+ }
+ s.untrustedDockerManifestDigest = digestValue
+
+ return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
+ "docker-reference": &s.untrustedDockerReference,
+ })
+}
+
+// Sign formats the signature and returns a blob signed using mech and keyIdentity
+// (If it seems surprising that this is a method on untrustedSignature, note that there
+// isn’t a good reason to think that a key used by the user is trusted by any component
+// of the system just because it is a private key — actually the presence of a private key
+// on the system increases the likelihood of an a successful attack on that private key
+// on that particular system.)
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
+ json, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
+ return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
+ }
+
+ if passphrase != "" {
+ return nil, errors.New("signing mechanism does not support passphrases")
+ }
+
+ return mech.Sign(json, keyIdentity)
+}
+
+// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
+// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type signatureAcceptanceRules struct {
+ validateKeyIdentity func(string) error
+ validateSignedDockerReference func(string) error
+ validateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
+// match expected values, both as specified by rules, and returns it
+func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
+ signed, keyIdentity, err := mech.Verify(unverifiedSignature)
+ if err != nil {
+ return nil, err
+ }
+ if err := rules.validateKeyIdentity(keyIdentity); err != nil {
+ return nil, err
+ }
+
+ var unmatchedSignature untrustedSignature
+ if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
+ return nil, internal.NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // signatureAcceptanceRules have accepted this value.
+ return &Signature{
+ DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
+ DockerReference: unmatchedSignature.untrustedDockerReference,
+ }, nil
+}
+
+// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
+// WITHOUT doing any cryptographic verification.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authorityâ€)
+func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
+ // NOTE: This should eventually do format autodetection.
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ defer mech.Close()
+
+ untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
+ if err != nil {
+ return nil, err
+ }
+ var untrustedDecodedContents untrustedSignature
+ if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
+ return nil, internal.NewInvalidSignatureError(err.Error())
+ }
+
+ var timestamp *time.Time // = nil
+ if untrustedDecodedContents.untrustedTimestamp != nil {
+ ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
+ timestamp = &ts
+ }
+ return &UntrustedSignatureInformation{
+ UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
+ UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
+ UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
+ UntrustedTimestamp: timestamp,
+ UntrustedShortKeyIdentifier: shortKeyIdentifier,
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
new file mode 100644
index 000000000..983bbb10b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
@@ -0,0 +1,105 @@
+package simplesigning
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ internalSig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/signer"
+)
+
+// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures.
+type simpleSigner struct {
+ mech signature.SigningMechanism
+ keyFingerprint string
+ passphrase string // "" if not provided.
+}
+
+type Option func(*simpleSigner) error
+
+// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint.
+func WithKeyFingerprint(keyFingerprint string) Option {
+ return func(s *simpleSigner) error {
+ s.keyFingerprint = keyFingerprint
+ return nil
+ }
+}
+
+// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key.
+// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry.
+func WithPassphrase(passphrase string) Option {
+ return func(s *simpleSigner) error {
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return errors.New("invalid passphrase: must not contain a line break")
+ }
+ s.passphrase = passphrase
+ return nil
+ }
+}
+
+// NewSigner returns a signature.Signer which creates “simple signing†signatures using the user’s default
+// GPG configuration ($GNUPGHOME / ~/.gnupg).
+//
+// The set of options must identify a key to sign with, probably using a WithKeyFingerprint.
+//
+// The caller must call Close() on the returned Signer.
+func NewSigner(opts ...Option) (*signer.Signer, error) {
+ mech, err := signature.NewGPGSigningMechanism()
+ if err != nil {
+ return nil, fmt.Errorf("initializing GPG: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ mech.Close()
+ }
+ }()
+ if err := mech.SupportsSigning(); err != nil {
+ return nil, fmt.Errorf("Signing not supported: %w", err)
+ }
+
+ s := simpleSigner{
+ mech: mech,
+ }
+ for _, o := range opts {
+ if err := o(&s); err != nil {
+ return nil, err
+ }
+ }
+ if s.keyFingerprint == "" {
+ return nil, errors.New("no key identity provided for simple signing")
+ }
+ // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that.
+
+ succeeded = true
+ return internalSigner.NewSigner(&s), nil
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+func (s *simpleSigner) ProgressMessage() string {
+ return "Signing image using simple signing"
+}
+
+// SignImageManifest creates a new signature for manifest m as dockerReference.
+func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) {
+ if reference.IsNameOnly(dockerReference) {
+ return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{
+ Passphrase: s.passphrase,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return internalSig.SimpleSigningFromBlob(simpleSig), nil
+}
+
+func (s *simpleSigner) Close() error {
+ return s.mech.Close()
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go
new file mode 100644
index 000000000..51fac71e4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go
@@ -0,0 +1,1405 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "slices"
+ "sync"
+ "sync/atomic"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
+ // with a digest-based name that doesn't match its contents.
+ // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
+ // and there is no known user of this error.
+ ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
+ // with an expected size that doesn't match the reader.
+ ErrBlobSizeMismatch = errors.New("blob size mismatch")
+)
+
+type storageImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.ImplementsPutBlobPartial
+ stubs.AlwaysSupportsSignatures
+
+ imageRef storageReference
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ manifestDigest digest.Digest // Valid if len(manifest) != 0
+ untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
+ signatures []byte // Signature contents, temporary
+ signatureses map[digest.Digest][]byte // Instance signature contents, temporary
+ metadata storageImageMetadata // Metadata contents being built
+
+ // Mapping from layer (by index) to the associated ID in the storage.
+ // It's protected *implicitly* since `commitLayer()`, at any given
+ // time, can only be executed by *one* goroutine. Please refer to
+ // `queueOrCommit()` for further details on how the single-caller
+ // guarantee is implemented.
+ indexToStorageID map[int]string
+
+ // A storage destination may be used concurrently, due to HasThreadSafePutBlob.
+ lock sync.Mutex // Protects lockProtected
+ lockProtected storageImageDestinationLockProtected
+}
+
+// storageImageDestinationLockProtected contains storageImageDestination data which might be
+// accessed concurrently, due to HasThreadSafePutBlob.
+// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit)
+// uses must hold storageImageDestination.lock.
+type storageImageDestinationLockProtected struct {
+ currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
+ indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
+
+ // Externally, a layer is identified either by (compressed) digest, or by TOC digest
+ // (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two
+ // different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffIDâ€) or a TOC digest.
+ // We may or may not know the relationships between these three values.
+ //
+ // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
+ // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
+ // recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.)
+ //
+ // Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available
+ // before commitLayer is called.
+ // The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted).
+ //
+ // WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle.
+ // The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently.
+ // But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs
+ // for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some
+ // other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer
+ // to mismatching contents.
+ // Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic,
+ // instead of interpreting these fields, especially blobDiffIDs, directly.
+ //
+ // Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
+ // to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
+ indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
+ indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
+
+ // Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
+ // should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
+ // They are looked up in the order they are mentioned above.
+ diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
+ indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
+ // Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
+ filenames map[digest.Digest]string
+ // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
+ fileSizes map[digest.Digest]int64
+}
+
+// addedLayerInfo records data about a layer to use in this image.
+type addedLayerInfo struct {
+ digest digest.Digest // Mandatory, the digest of the layer.
+ emptyLayer bool // The layer is an “emptyâ€/“throwaway†one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := tmpdir.MkDirBigFileTemp(sys, "storage")
+ if err != nil {
+ return nil, fmt.Errorf("creating a temporary directory: %w", err)
+ }
+ dest := &storageImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ },
+ // We ultimately have to decompress layers to populate trees on disk
+ // and need to explicitly ask for it here, so that the layers' MIME
+ // types can be set accordingly.
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
+ HasThreadSafePutBlob: true,
+ }),
+
+ imageRef: imageRef,
+ directory: directory,
+ signatureses: make(map[digest.Digest][]byte),
+ metadata: storageImageMetadata{
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ },
+ indexToStorageID: make(map[int]string),
+ lockProtected: storageImageDestinationLockProtected{
+ indexToAddedLayerInfo: make(map[int]addedLayerInfo),
+
+ indexToDiffID: make(map[int]digest.Digest),
+ indexToTOCDigest: make(map[int]digest.Digest),
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+
+ diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
+ indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
+ filenames: make(map[digest.Digest]string),
+ fileSizes: make(map[digest.Digest]int64),
+ },
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (s *storageImageDestination) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up the temporary directory and additional layer store handlers.
+func (s *storageImageDestination) Close() error {
+ // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
+ for _, al := range s.lockProtected.indexToAdditionalLayer {
+ al.Release()
+ }
+ for _, v := range s.lockProtected.diffOutputs {
+ _ = s.imageRef.transport.store.CleanupStagedLayer(v)
+ }
+ return os.RemoveAll(s.directory)
+}
+
+func (s *storageImageDestination) computeNextBlobCacheFile() string {
+ return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
+ if err != nil {
+ return info, err
+ }
+
+ if options.IsConfig || options.LayerIndex == nil {
+ return info, nil
+ }
+
+ return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
+ digest: info.Digest,
+ emptyLayer: options.EmptyLayer,
+ })
+}
+
+// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
+ if blobinfo.Digest != "" {
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
+ }
+ }
+
+ // Set up to digest the blob if necessary, and count its size while saving it to a file.
+ filename := s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ defer file.Close()
+ counter := ioutils.NewWriteCounter(file)
+ stream = io.TeeReader(stream, counter)
+ digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
+ decompressed, err := archive.DecompressStream(stream)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
+ }
+
+ diffID := digest.Canonical.Digester()
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using context.Context.
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+
+ // Determine blob properties, and fail if information that we were given about the blob
+ // is known to be incorrect.
+ blobDigest := digester.Digest()
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ } else if blobinfo.Size != counter.Count {
+ return private.UploadedBlob{}, ErrBlobSizeMismatch
+ }
+
+ // Record information about the blob.
+ s.lock.Lock()
+ s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
+ s.lockProtected.fileSizes[blobDigest] = counter.Count
+ s.lockProtected.filenames[blobDigest] = filename
+ s.lock.Unlock()
+ // This is safe because we have just computed diffID, and blobDigest was either computed
+ // by us, or validated by the caller (usually copy.digestingReader).
+ options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
+ return private.UploadedBlob{
+ Digest: blobDigest,
+ Size: blobSize,
+ }, nil
+}
+
+type zstdFetcher struct {
+ chunkAccessor private.BlobChunkAccessor
+ ctx context.Context
+ blobInfo types.BlobInfo
+}
+
+// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
+func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
+ for _, v := range chunks {
+ i := private.ImageSourceChunk{
+ Offset: v.Offset,
+ Length: v.Length,
+ }
+ newChunks = append(newChunks, i)
+ }
+ rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
+ if _, ok := err.(private.BadPartialRequestError); ok {
+ err = chunked.ErrBadRequest{}
+ }
+ return rc, errs, err
+
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
+ fetcher := zstdFetcher{
+ chunkAccessor: chunkAccessor,
+ ctx: ctx,
+ blobInfo: srcInfo,
+ }
+
+ defer func() {
+ var perr chunked.ErrFallbackToOrdinaryLayerDownload
+ if errors.As(retErr, &perr) {
+ retErr = private.NewErrFallbackToOrdinaryLayerDownload(retErr)
+ }
+ }()
+
+ differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("staging a partially-pulled layer: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ _ = s.imageRef.transport.store.CleanupStagedLayer(out)
+ }
+ }()
+
+ if out.TOCDigest == "" && out.UncompressedDigest == "" {
+ return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set")
+ }
+
+ blobDigest := srcInfo.Digest
+
+ s.lock.Lock()
+ if out.UncompressedDigest != "" {
+ s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
+ if out.TOCDigest != "" {
+ options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
+ }
+ // Don’t set indexToTOCDigest on this path:
+ // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
+ // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
+ // That TOC is quite unlikely to match any other TOC value.
+
+ // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
+ // responsible for ensuring blobDigest has been validated.
+ if out.CompressedDigest != blobDigest {
+ return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
+ out.CompressedDigest, blobDigest)
+ }
+ // So, record also information about blobDigest, that might benefit reuse.
+ // We trust PrepareStagedLayer to validate or create both values correctly.
+ s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
+ options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
+ } else {
+ // Use diffID for layer identity if it is known.
+ if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
+ s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
+ }
+ s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
+ }
+ s.lockProtected.diffOutputs[options.LayerIndex] = out
+ s.lock.Unlock()
+
+ succeeded = true
+ return private.UploadedBlob{
+ Digest: blobDigest,
+ Size: srcInfo.Size,
+ }, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
+ if err != nil || !reused || options.LayerIndex == nil {
+ return reused, info, err
+ }
+
+ return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
+ digest: info.Digest,
+ emptyLayer: options.EmptyLayer,
+ })
+}
+
+// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if blobDigest == "" {
+ return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
+ }
+ if err := blobDigest.Validate(); err != nil {
+ return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
+ }
+ if options.TOCDigest != "" {
+ if err := options.TOCDigest.Validate(); err != nil {
+ return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
+ }
+ }
+
+ // lock the entire method as it executes fairly quickly
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
+ // Check if we have the layer in the underlying additional layer store.
+ aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
+ } else if err == nil {
+ alsTOCDigest := aLayer.TOCDigest()
+ if alsTOCDigest != options.TOCDigest {
+ // FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
+ // probably go on reading the layer from other sources.
+ //
+ // Currently it should not be possible for alsTOCDigest to be set and not the expected value, but there’s
+ // not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way.
+ return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q",
+ options.TOCDigest, alsTOCDigest)
+ }
+ s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
+ s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer
+ return true, private.ReusedBlob{
+ Digest: blobDigest,
+ Size: aLayer.CompressedSize(),
+ }, nil
+ }
+ }
+
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+
+ // Check if we've already cached it in a file.
+ if size, ok := s.lockProtected.fileSizes[blobDigest]; ok {
+ // s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the
+ // filenames/fileSizes entry.
+ return true, private.ReusedBlob{
+ Digest: blobDigest,
+ Size: size,
+ }, nil
+ }
+
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err)
+ }
+ if len(layers) > 0 {
+ s.lockProtected.blobDiffIDs[blobDigest] = blobDigest
+ return true, private.ReusedBlob{
+ Digest: blobDigest,
+ Size: layers[0].UncompressedSize,
+ }, nil
+ }
+
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err)
+ }
+ if len(layers) > 0 {
+ // LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that
+ // always sets UncompressedDigest.
+ diffID := layers[0].UncompressedDigest
+ if diffID == "" {
+ return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String())
+ }
+ s.lockProtected.blobDiffIDs[blobDigest] = diffID
+ return true, private.ReusedBlob{
+ Digest: blobDigest,
+ Size: layers[0].CompressedSize,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
+ if options.CanSubstitute || size != -1 {
+ if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+ }
+ if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+ s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
+ return true, reused, nil
+ }
+ }
+ }
+
+ if options.TOCDigest != "" && options.LayerIndex != nil {
+ // Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
+ // Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
+ uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
+ if uncompressedDigest != "" {
+ layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+ }
+ if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+ s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
+ reused.MatchedByTOCDigest = true
+ return true, reused, nil
+ }
+ }
+ // Check if we have a chunked layer in storage with the same TOC digest.
+ layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
+ }
+ if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+ if uncompressedDigest != "" {
+ s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
+ }
+ s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
+ reused.MatchedByTOCDigest = true
+ return true, reused, nil
+ }
+ }
+
+ // Nope, we don't have it.
+ return false, private.ReusedBlob{}, nil
+}
+
+// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not.
+// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again.
+func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) {
+ if len(layers) > 0 {
+ if blobSize != -1 {
+ return true, private.ReusedBlob{
+ Digest: blobDigest,
+ Size: blobSize,
+ }
+ } else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
+ return true, private.ReusedBlob{
+ Digest: layers[0].UncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ CompressionOperation: types.Decompress,
+ CompressionAlgorithm: nil,
+ }
+ }
+ }
+ return false, private.ReusedBlob{}
+}
+
+// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
+type trustedLayerIdentityData struct {
+ layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
+
+ diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
+ tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
+ blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
+}
+
+// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
+// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
+// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
+//
+// The caller must hold s.lock.
+func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) {
+ // The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set,
+ // even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available.
+ //
+ // If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find
+ // a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file:
+ // but the layer in question, and possibly child layers, might already have been committed to storage.
+ // A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set
+ // new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again.
+ //
+ // Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match
+ // and proceed to the originally-planned TOC match.
+
+ res := trustedLayerIdentityData{}
+ diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex]
+ if layerIdentifiedByDiffID {
+ res.layerIdentifiedByTOC = false
+ res.diffID = diffID
+ }
+ if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok {
+ res.tocDigest = tocDigest
+ if !layerIdentifiedByDiffID {
+ res.layerIdentifiedByTOC = true
+ }
+ }
+ if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok {
+ if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
+ // This is the only data we have, so it is clearly self-consistent.
+ res.layerIdentifiedByTOC = false
+ res.diffID = otherDiffID
+ res.blobDigest = blobDigest
+ layerIdentifiedByDiffID = true
+ } else {
+ // We have set up the layer identity without referring to blobDigest:
+ // an attacker might have used a manifest with non-matching tocDigest and blobDigest.
+ // But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest,
+ // we know blobDigest is fine as well.
+ if res.diffID != "" && otherDiffID == res.diffID {
+ res.blobDigest = blobDigest
+ }
+ }
+ }
+ if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
+ return trustedLayerIdentityData{}, false // We found nothing at all
+ }
+ return res, true
+}
+
+// computeID computes a recommended image ID based on information we have so far. If
+// the manifest is not of a type that we recognize, we return an empty value, indicating
+// that since we don't have a recommendation, a random ID should be used if one needs
+// to be allocated.
+func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) {
+ // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
+
+ layerInfos := m.LayerInfos()
+
+ // Build the diffID list. We need the decompressed sums that we've been calculating to
+ // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
+ // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
+ var diffIDs []digest.Digest
+ switch m.(type) {
+ case *manifest.Schema1:
+ // Build a list of the diffIDs we've generated for the non-throwaway FS layers
+ for i, li := range layerInfos {
+ if li.EmptyLayer {
+ continue
+ }
+ trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
+ if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
+ return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
+ }
+ if trusted.diffID == "" {
+ if trusted.layerIdentifiedByTOC {
+ logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID")
+ return "", nil
+ }
+ return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest)
+ }
+ diffIDs = append(diffIDs, trusted.diffID)
+ }
+ case *manifest.Schema2, *manifest.OCI1:
+ // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
+ // the diffID list.
+ default:
+ return "", nil
+ }
+
+ // We want to use the same ID for “the same†images, but without risking unwanted sharing / malicious image corruption.
+ //
+ // Traditionally that means the same ~config digest, as computed by m.ImageID;
+ // but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
+ // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see
+ // most of the data.
+ //
+ // So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
+ // must enter into the image ID computation.
+ // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
+ // and to introduce the changed behavior only when partial pulls are used.
+ //
+ // Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider
+ // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
+ ordinaryImageID, err := m.ImageID(diffIDs)
+ if err != nil {
+ return "", err
+ }
+ tocIDInput := ""
+ hasLayerPulledByTOC := false
+ for i, li := range layerInfos {
+ trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
+ if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
+ return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
+ }
+ layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
+ if trusted.layerIdentifiedByTOC {
+ hasLayerPulledByTOC = true
+ layerValue = trusted.tocDigest.String()
+ }
+ tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
+ }
+
+ if !hasLayerPulledByTOC {
+ return ordinaryImageID, nil
+ }
+ // ordinaryImageID is a digest of a config, which is a JSON value.
+ // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
+ tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
+ logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
+ return tocImageID, nil
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.New(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.lockProtected.filenames[info.Digest]; ok {
+ contents, err2 := os.ReadFile(filename)
+ if err2 != nil {
+ return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
+ }
+ return contents, nil
+ }
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
+}
+
+// queueOrCommit queues the specified layer to be committed to the storage.
+// If no other goroutine is already committing layers, the layer and all
+// subsequent layers (if already queued) will be committed to the storage.
+func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
+ // NOTE: whenever the code below is touched, make sure that all code
+ // paths unlock the lock and to unlock it exactly once.
+ //
+ // Conceptually, the code is divided in two stages:
+ //
+ // 1) Queue in work by marking the layer as ready to be committed.
+ // If at least one previous/parent layer with a lower index has
+ // not yet been committed, return early.
+ //
+ // 2) Process the queued-in work by committing the "ready" layers
+ // in sequence. Make sure that more items can be queued-in
+ // during the comparatively I/O expensive task of committing a
+ // layer.
+ //
+ // The conceptual benefit of this design is that caller can continue
+ // pulling layers after an early return. At any given time, only one
+ // caller is the "worker" routine committing layers. All other routines
+ // can continue pulling and queuing in layers.
+ s.lock.Lock()
+ s.lockProtected.indexToAddedLayerInfo[index] = info
+
+ // We're still waiting for at least one previous/parent layer to be
+ // committed, so there's nothing to do.
+ if index != s.lockProtected.currentIndex {
+ s.lock.Unlock()
+ return nil
+ }
+
+ for {
+ info, ok := s.lockProtected.indexToAddedLayerInfo[index]
+ if !ok {
+ break
+ }
+ s.lock.Unlock()
+ // Note: commitLayer locks on-demand.
+ if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil {
+ return err
+ }
+ s.lock.Lock()
+ index++
+ }
+
+ // Set the index at the very end to make sure that only one routine
+ // enters stage 2).
+ s.lockProtected.currentIndex = index
+ s.lock.Unlock()
+ return nil
+}
+
+// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
+// and an indication whether the input already has the shape of a layer ID.
+// It returns ("", false) if the layer is not found at all (which should never happen)
+func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
+ if !ok {
+ return "", false
+ }
+ if trusted.layerIdentifiedByTOC {
+ return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
+ }
+ return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
+}
+
+// commitLayer commits the specified layer with the given index to the storage.
+// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
+//
+// If the layer cannot be committed yet, the function returns (true, nil).
+//
+// Note that the previous layer is expected to already be committed.
+//
+// Caution: this function must be called without holding `s.lock`. Callers
+// must guarantee that, at any given time, at most one goroutine may execute
+// `commitLayer()`.
+func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
+ // Already committed? Return early.
+ if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
+ return false, nil
+ }
+
+ // Start with an empty string or the previous layer ID. Note that
+ // `s.indexToStorageID` can only be accessed by *one* goroutine at any
+ // given time. Hence, we don't need to lock accesses.
+ var parentLayer string
+ if index != 0 {
+ prev, ok := s.indexToStorageID[index-1]
+ if !ok {
+ return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
+ }
+ parentLayer = prev
+ }
+
+ // Carry over the previous ID for empty non-base layers.
+ if info.emptyLayer {
+ s.indexToStorageID[index] = parentLayer
+ return false, nil
+ }
+
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ // The layerID refers either to the DiffID or the digest of the TOC.
+ layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
+ if layerIDComponent == "" {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
+ //
+ // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
+ // that relies on using a blob digest that has never been seen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
+ //
+ // We are also ignoring lookups by TOC, and other non-trivial situations.
+ // Those can only happen using the c/image/internal/private API,
+ // so those internal callers should be fixed to follow the API instead of expanding this fallback.
+ logrus.Debugf("looking for diffID for blob=%+v", info.digest)
+
+ // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
+ has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
+ Cache: none.NoCache,
+ CanSubstitute: false,
+ })
+ if err != nil {
+ return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
+ }
+ if !has {
+ return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
+ }
+
+ layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
+ if layerIDComponent == "" {
+ return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
+ }
+ }
+
+ id := layerIDComponent
+ if !layerIDComponentStandalone || parentLayer != "" {
+ id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ s.indexToStorageID[index] = layer.ID
+ return false, nil
+ }
+
+ layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
+ if err != nil {
+ return false, err
+ }
+ if layer == nil {
+ return true, nil
+ }
+ s.indexToStorageID[index] = layer.ID
+ return false, nil
+}
+
+// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
+// If the layer cannot be committed yet, the function returns (nil, nil).
+func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
+ s.lock.Lock()
+ diffOutput, ok := s.lockProtected.diffOutputs[index]
+ s.lock.Unlock()
+ if ok {
+ // If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
+ // That way it will be persisted in storage even if the cache is deleted; also
+ // we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
+ // the costly commit delay until a manifest is available).
+ s.lock.Lock()
+ if d, ok := s.lockProtected.indexToDiffID[index]; ok {
+ diffOutput.UncompressedDigest = d
+ }
+ s.lock.Unlock()
+
+ var untrustedUncompressedDigest digest.Digest
+ if diffOutput.UncompressedDigest == "" {
+ d, err := s.untrustedLayerDiffID(index)
+ if err != nil {
+ return nil, err
+ }
+ if d == "" {
+ logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
+ return nil, nil
+ }
+
+ untrustedUncompressedDigest = d
+ // While the contents of the digest are untrusted, make sure at least the _format_ is valid,
+ // because we are going to write it to durable storage in expectedLayerDiffIDFlag .
+ if err := untrustedUncompressedDigest.Validate(); err != nil {
+ return nil, err
+ }
+ }
+
+ flags := make(map[string]interface{})
+ if untrustedUncompressedDigest != "" {
+ flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest.String()
+ logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID)
+ }
+
+ args := storage.ApplyStagedLayerOptions{
+ ID: newLayerID,
+ ParentLayer: parentLayer,
+
+ DiffOutput: diffOutput,
+ DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{
+ Flags: flags,
+ },
+ }
+ layer, err := s.imageRef.transport.store.ApplyStagedLayer(args)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err)
+ }
+ return layer, nil
+ }
+
+ s.lock.Lock()
+ al, ok := s.lockProtected.indexToAdditionalLayer[index]
+ s.lock.Unlock()
+ if ok {
+ layer, err := al.PutAs(newLayerID, parentLayer, nil)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err)
+ }
+ return layer, nil
+ }
+
+ // Check if we previously cached a file with that blob's contents. If we didn't,
+ // then we need to read the desired contents from a layer.
+ var filename string
+ var gotFilename bool
+ s.lock.Lock()
+ trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
+ if ok && trusted.blobDigest != "" {
+ filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
+ }
+ s.lock.Unlock()
+ if !ok { // We have already determined newLayerID, so the data must have been available.
+ return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
+ }
+ var trustedOriginalDigest digest.Digest // For storage.LayerOptions
+ if gotFilename {
+ // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
+ trustedOriginalDigest = trusted.blobDigest
+ } else {
+ // Try to find the layer with contents matching the data we use.
+ var layer *storage.Layer // = nil
+ if trusted.diffID != "" {
+ if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 {
+ layer = &layers[0]
+ }
+ }
+ if layer == nil && trusted.tocDigest != "" {
+ if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 {
+ layer = &layers[0]
+ }
+ }
+ if layer == nil && trusted.blobDigest != "" {
+ if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 {
+ layer = &layers[0]
+ }
+ }
+ if layer == nil {
+ return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
+ }
+
+ // Read the layer's contents.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err2 != nil {
+ return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
+ }
+ // Copy the layer diff to a file. Diff() takes a lock that it holds
+ // until the ReadCloser that it returns is closed, and PutLayer() wants
+ // the same lock, so the diff can't just be directly streamed from one
+ // to the other.
+ filename = s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
+ if err != nil {
+ diff.Close()
+ return nil, fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using
+ // ctx.Done().
+ fileSize, err := io.Copy(file, diff)
+ diff.Close()
+ file.Close()
+ if err != nil {
+ return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+
+ if trusted.diffID == "" && layer.UncompressedDigest != "" {
+ trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
+ }
+ // The stream we have is uncompressed, and it matches trusted.diffID (if known).
+ //
+ // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
+ // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
+ // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
+ //
+ // We can legitimately set storage.LayerOptions.OriginalDigest to "",
+ // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
+ // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
+ trustedOriginalDigest = trusted.diffID
+
+ // Allow using the already-collected layer contents without extracting the layer again.
+ //
+ // This only matches against the uncompressed digest.
+ // We don’t have the original compressed data here to trivially set filenames[layerDigest].
+ // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
+ // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
+ if trusted.diffID != "" {
+ s.lock.Lock()
+ s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID
+ s.lockProtected.filenames[trusted.diffID] = filename
+ s.lockProtected.fileSizes[trusted.diffID] = fileSize
+ s.lock.Unlock()
+ }
+ }
+ // Read the cached blob and use it as a diff.
+ file, err := os.Open(filename)
+ if err != nil {
+ return nil, fmt.Errorf("opening file %q: %w", filename, err)
+ }
+ defer file.Close()
+ // Build the new layer using the diff, regardless of where it came from.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
+ OriginalDigest: trustedOriginalDigest,
+ // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
+ UncompressedDigest: trusted.diffID,
+ }, file)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
+ }
+ return layer, nil
+}
+
+// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
+// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
+// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
+func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
+ // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
+ // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
+ // Either way this function does not need the protection of s.lock.
+ if s.manifest == nil {
+ return "", nil
+ }
+
+ if s.untrustedDiffIDValues == nil {
+ mt := manifest.GuessMIMEType(s.manifest)
+ if mt != imgspecv1.MediaTypeImageManifest {
+ // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
+ // and then use types.Image.OCIConfig so that we can parse the image.
+ //
+ // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
+ // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
+ // So it is not implemented yet.
+ return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
+ }
+ man, err := manifest.FromBlob(s.manifest, mt)
+ if err != nil {
+ return "", fmt.Errorf("parsing manifest: %w", err)
+ }
+
+ cb, err := s.getConfigBlob(man.ConfigInfo())
+ if err != nil {
+ return "", err
+ }
+
+ // retrieve the expected uncompressed digest from the config blob.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(cb, configOCI); err != nil {
+ return "", err
+ }
+ s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
+ if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
+ s.untrustedDiffIDValues = []digest.Digest{}
+ }
+ }
+ if layerIndex >= len(s.untrustedDiffIDValues) {
+ return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex)
+ }
+ return s.untrustedDiffIDValues[layerIndex], nil
+}
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+ // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
+
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
+ }
+ toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("retrieving top-level manifest: %w", err)
+ }
+ // If the name we're saving to includes a digest, then check that the
+ // manifests that we're about to save all either match the one from the
+ // options.UnparsedToplevel, or match the digest in the name that we're using.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ }
+ if !matches {
+ return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
+ // Find the list of layer blobs.
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return fmt.Errorf("parsing manifest: %w", err)
+ }
+ layerBlobs := man.LayerInfos()
+
+ // Extract, commit, or find the layers.
+ for i, blob := range layerBlobs {
+ if stopQueue, err := s.commitLayer(i, addedLayerInfo{
+ digest: blob.Digest,
+ emptyLayer: blob.EmptyLayer,
+ }, blob.Size); err != nil {
+ return err
+ } else if stopQueue {
+ return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): commitLayer() not ready to commit for layer %q", blob.Digest)
+ }
+ }
+ var lastLayer string
+ if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts.
+ prev, ok := s.indexToStorageID[len(layerBlobs)-1]
+ if !ok {
+ return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
+ }
+ lastLayer = prev
+ }
+
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ imgOptions := &storage.ImageOptions{}
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ imgOptions.CreationDate = *inspect.Created
+ }
+
+ // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := set.New[digest.Digest]()
+ for blob := range s.lockProtected.filenames {
+ dataBlobs.Add(blob)
+ }
+ for _, layerBlob := range layerBlobs {
+ dataBlobs.Delete(layerBlob.Digest)
+ }
+ for _, blob := range dataBlobs.Values() {
+ v, err := os.ReadFile(s.lockProtected.filenames[blob])
+ if err != nil {
+ return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
+ }
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: blob.String(),
+ Data: v,
+ Digest: digest.Canonical.FromBytes(v),
+ })
+ }
+ // Set up to save the options.UnparsedToplevel's manifest if it differs from
+ // the per-platform one, which is saved below.
+ if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
+ manifestDigest, err := manifest.Digest(toplevelManifest)
+ if err != nil {
+ return fmt.Errorf("digesting top-level manifest: %w", err)
+ }
+ key, err := manifestBigDataKey(manifestDigest)
+ if err != nil {
+ return err
+ }
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: key,
+ Data: toplevelManifest,
+ Digest: manifestDigest,
+ })
+ }
+ // Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
+ // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
+ key, err := manifestBigDataKey(s.manifestDigest)
+ if err != nil {
+ return err
+ }
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: key,
+ Data: s.manifest,
+ Digest: s.manifestDigest,
+ })
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: storage.ImageDigestBigDataKey,
+ Data: s.manifest,
+ Digest: s.manifestDigest,
+ })
+ // Set up to save the signatures, if we have any.
+ if len(s.signatures) > 0 {
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: "signatures",
+ Data: s.signatures,
+ Digest: digest.Canonical.FromBytes(s.signatures),
+ })
+ }
+ for instanceDigest, signatures := range s.signatureses {
+ key, err := signatureBigDataKey(instanceDigest)
+ if err != nil {
+ return err
+ }
+ imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+ Key: key,
+ Data: signatures,
+ Digest: digest.Canonical.FromBytes(signatures),
+ })
+ }
+
+ // Set up to save our metadata.
+ metadata, err := json.Marshal(s.metadata)
+ if err != nil {
+ return fmt.Errorf("encoding metadata for image: %w", err)
+ }
+ if len(metadata) != 0 {
+ imgOptions.Metadata = string(metadata)
+ }
+
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID, err = s.computeID(man)
+ if err != nil {
+ return err
+ }
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", imgOptions)
+ if err != nil {
+ if !errors.Is(err, storage.ErrDuplicateID) {
+ logrus.Debugf("error creating image: %q", err)
+ return fmt.Errorf("creating image %q: %w", intendedID, err)
+ }
+ img, err = s.imageRef.transport.store.Image(intendedID)
+ if err != nil {
+ return fmt.Errorf("reading image %q: %w", intendedID, err)
+ }
+ if img.TopLayer != lastLayer {
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID)
+ }
+ logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
+ // set the data items and metadata on the already-present image
+ // FIXME: this _replaces_ any "signatures" blobs and their
+ // sizes (tracked in the metadata) which might have already
+ // been present with new values, when ideally we'd find a way
+ // to merge them since they all apply to the same image
+ for _, data := range imgOptions.BigData {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
+ logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
+ return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
+ }
+ }
+ if imgOptions.Metadata != "" {
+ if err := s.imageRef.transport.store.SetMetadata(img.ID, imgOptions.Metadata); err != nil {
+ logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
+ }
+ logrus.Debugf("saved image metadata %q", imgOptions.Metadata)
+ }
+ } else {
+ logrus.Debugf("created new image ID %q with metadata %q", img.ID, imgOptions.Metadata)
+ }
+
+ // Clean up the unfinished image on any error.
+ // (Is this the right thing to do if the image has existed before?)
+ commitSucceeded := false
+ defer func() {
+ if !commitSucceeded {
+ logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
+ if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
+ logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
+ }
+ }
+ }()
+
+ // Add the reference's name on the image. We don't need to worry about avoiding duplicate
+ // values because AddNames() will deduplicate the list that we pass to it.
+ if name := s.imageRef.DockerReference(); name != nil {
+ if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
+ return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err)
+ }
+ logrus.Debugf("added name %q to image %q", name, img.ID)
+ }
+ if options.ReportResolvedReference != nil {
+ // FIXME? This is using nil for the named reference.
+ // It would be better to also use s.imageRef.named, because that allows us to resolve to the right
+ // digest / manifest (and corresponding signatures).
+ // The problem with that is that resolving such a reference fails if the s.imageRef.named name is moved to a different image
+ // (because it is a tag that moved, or because we have pulled “the same†image for a different architecture).
+ // Right now (2024-11), ReportResolvedReference is only used in c/common/libimage, where the caller only extracts the image ID,
+ // so the name does not matter; to give us options, copy.Options.ReportResolvedReference is explicitly refusing to document
+ // whether the value contains a name.
+ resolved, err := newReference(s.imageRef.transport, nil, intendedID)
+ if err != nil {
+ return fmt.Errorf("creating a resolved reference for (%s, %s): %w", s.imageRef.StringWithinTransport(), intendedID, err)
+ }
+ *options.ReportResolvedReference = resolved
+ }
+
+ commitSucceeded = true
+ return nil
+}
+
+// PutManifest writes the manifest to the destination.
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ s.manifest = bytes.Clone(manifestBlob)
+ s.manifestDigest = digest
+ return nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ sizes := []int{}
+ sigblob := []byte{}
+ for _, sigWithFormat := range signatures {
+ sig, err := signature.Blob(sigWithFormat)
+ if err != nil {
+ return err
+ }
+ sizes = append(sizes, len(sig))
+ sigblob = append(sigblob, sig...)
+ }
+ if instanceDigest == nil {
+ s.signatures = sigblob
+ s.metadata.SignatureSizes = sizes
+ if len(s.manifest) > 0 {
+ manifestDigest := s.manifestDigest
+ instanceDigest = &manifestDigest
+ }
+ }
+ if instanceDigest != nil {
+ s.signatureses[*instanceDigest] = sigblob
+ s.metadata.SignaturesSizes[*instanceDigest] = sizes
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index f4747357c..7e368857e 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -1,1253 +1,51 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
- "bytes"
"context"
- "encoding/json"
- stderrors "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
- "github.com/containers/image/v5/internal/tmpdir"
- internalTypes "github.com/containers/image/v5/internal/types"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
var (
- // ErrBlobDigestMismatch is returned when PutBlob() is given a blob
- // with a digest-based name that doesn't match its contents.
- ErrBlobDigestMismatch = stderrors.New("blob digest mismatch")
- // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
- // with an expected size that doesn't match the reader.
- ErrBlobSizeMismatch = stderrors.New("blob size mismatch")
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
ErrNoSuchImage = storage.ErrNotAnImage
)
-type storageImageSource struct {
- imageRef storageReference
- image *storage.Image
- systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
- layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
- cachedManifest []byte // A cached copy of the manifest, if already known, or nil
- getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
-}
-
-type storageImageDestination struct {
- imageRef storageReference
- directory string // Temporary directory where we store blobs until Commit() time
- nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
- manifest []byte // Manifest contents, temporary
- signatures []byte // Signature contents, temporary
- signatureses map[digest.Digest][]byte // Instance signature contents, temporary
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
-
- // A storage destination may be used concurrently. Accesses are
- // serialized via a mutex. Please refer to the individual comments
- // below for details.
- lock sync.Mutex
- // Mapping from layer (by index) to the associated ID in the storage.
- // It's protected *implicitly* since `commitLayer()`, at any given
- // time, can only be executed by *one* goroutine. Please refer to
- // `queueOrCommit()` for further details on how the single-caller
- // guarantee is implemented.
- indexToStorageID map[int]*string
- // All accesses to below data are protected by `lock` which is made
- // *explicit* in the code.
- blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
- fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
- filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
- currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
- indexToPulledBlob map[int]*types.BlobInfo // Mapping from layer (by index) to pulled down blob
- blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
-}
-
-type storageImageCloser struct {
- types.ImageCloser
- size int64
-}
-
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
-func manifestBigDataKey(digest digest.Digest) string {
- return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
+func manifestBigDataKey(digest digest.Digest) (string, error) {
+ if err := digest.Validate(); err != nil { // Make sure info.Digest.String() uses the expected format and does not collide with other BigData keys.
+ return "", err
+ }
+ return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String(), nil
}
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
-func signatureBigDataKey(digest digest.Digest) string {
- return "signature-" + digest.Encoded()
-}
-
-// newImageSource sets up an image for reading.
-func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
- // First, locate the image.
- img, err := imageRef.resolveImage(sys)
- if err != nil {
- return nil, err
- }
-
- // Build the reader object.
- image := &storageImageSource{
- imageRef: imageRef,
- systemContext: sys,
- image: img,
- layerPosition: make(map[digest.Digest]int),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- }
- if img.Metadata != "" {
- if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
- return nil, errors.Wrap(err, "error decoding metadata for source image")
- }
- }
- return image, nil
-}
-
-// Reference returns the image reference that we used to find this image.
-func (s *storageImageSource) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up any resources we tied up while reading the image.
-func (s *storageImageSource) Close() error {
- return nil
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *storageImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
- if info.Digest == image.GzippedEmptyLayerDigest {
- return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
- }
-
- // NOTE: the blob is first written to a temporary file and subsequently
- // closed. The intention is to keep the time we own the storage lock
- // as short as possible to allow other processes to access the storage.
- rc, n, _, err = s.getBlobAndLayerID(info)
- if err != nil {
- return nil, 0, err
- }
- defer rc.Close()
-
- tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
- if err != nil {
- return nil, 0, err
- }
-
- if _, err := io.Copy(tmpFile, rc); err != nil {
- return nil, 0, err
- }
-
- if _, err := tmpFile.Seek(0, 0); err != nil {
- return nil, 0, err
- }
-
- wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
- defer os.Remove(tmpFile.Name())
- return tmpFile.Close()
- })
-
- return wrapper, n, err
-}
-
-// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
- var layer storage.Layer
- var diffOptions *storage.DiffOptions
- // We need a valid digest value.
- err = info.Digest.Validate()
- if err != nil {
- return nil, -1, "", err
- }
- // Check if the blob corresponds to a diff that was used to initialize any layers. Our
- // callers should try to retrieve layers using their uncompressed digests, so no need to
- // check if they're using one of the compressed digests, which we can't reproduce anyway.
- layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
-
- // If it's not a layer, then it must be a data item.
- if len(layers) == 0 {
- b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
- if err != nil {
- return nil, -1, "", err
- }
- r := bytes.NewReader(b)
- logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
- return ioutil.NopCloser(r), int64(r.Len()), "", nil
- }
- // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
- // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
- // just go ahead and use the first one every time.
- s.getBlobMutex.Lock()
- i := s.layerPosition[info.Digest]
- s.layerPosition[info.Digest] = i + 1
- s.getBlobMutex.Unlock()
- if len(layers) > 0 {
- layer = layers[i%len(layers)]
- }
- // Force the storage layer to not try to match any compression that was used when the layer was first
- // handed to it.
- noCompression := archive.Uncompressed
- diffOptions = &storage.DiffOptions{
- Compression: &noCompression,
- }
- if layer.UncompressedSize < 0 {
- n = -1
- } else {
- n = layer.UncompressedSize
- }
- logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
- rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
- if err != nil {
- return nil, -1, "", err
- }
- return rc, n, layer.ID, err
-}
-
-// GetManifest() reads the image's manifest.
-func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
- if instanceDigest != nil {
- key := manifestBigDataKey(*instanceDigest)
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error reading manifest for image instance %q", *instanceDigest)
- }
- return blob, manifest.GuessMIMEType(blob), err
- }
- if len(s.cachedManifest) == 0 {
- // The manifest is stored as a big data item.
- // Prefer the manifest corresponding to the user-specified digest, if available.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- key := manifestBigDataKey(digested.Digest())
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
- return nil, "", err
- }
- if err == nil {
- s.cachedManifest = blob
- }
- }
- }
- // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
- // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
- if len(s.cachedManifest) == 0 {
- cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
- if err != nil {
- return nil, "", err
- }
- s.cachedManifest = cachedBlob
- }
- }
- return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
-}
-
-// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
-// the image, after they've been decompressed.
-func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID)
- }
- if manifest.MIMETypeIsMultiImage(manifestType) {
- return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
- }
- man, err := manifest.FromBlob(manifestBlob, manifestType)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID)
- }
-
- uncompressedLayerType := ""
- switch manifestType {
- case imgspecv1.MediaTypeImageManifest:
- uncompressedLayerType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
- uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
- }
-
- physicalBlobInfos := []types.BlobInfo{}
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID)
- }
- if layer.UncompressedDigest == "" {
- return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID)
- }
- if layer.UncompressedSize < 0 {
- return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID)
- }
- blobInfo := types.BlobInfo{
- Digest: layer.UncompressedDigest,
- Size: layer.UncompressedSize,
- MediaType: uncompressedLayerType,
- }
- physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
- layerID = layer.Parent
- }
-
- res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID)
- }
- return res, nil
-}
-
-// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
-// but using layer data which we can actually produce — physicalInfos for non-empty layers,
-// and image.GzippedEmptyLayer for empty ones.
-// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
-func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
- nextPhysical := 0
- res := make([]types.BlobInfo, len(manifestInfos))
- for i, mi := range manifestInfos {
- if mi.EmptyLayer {
- res[i] = types.BlobInfo{
- Digest: image.GzippedEmptyLayerDigest,
- Size: int64(len(image.GzippedEmptyLayer)),
- MediaType: mi.MediaType,
- }
- } else {
- if nextPhysical >= len(physicalInfos) {
- return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
- }
- res[i] = physicalInfos[nextPhysical]
- nextPhysical++
- }
- }
- if nextPhysical != len(physicalInfos) {
- return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
- }
- return res, nil
-}
-
-// GetSignatures() parses the image's signatures blob into a slice of byte slices.
-func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) {
- var offset int
- sigslice := [][]byte{}
- signature := []byte{}
- signatureSizes := s.SignatureSizes
- key := "signatures"
- instance := "default instance"
- if instanceDigest != nil {
- signatureSizes = s.SignaturesSizes[*instanceDigest]
- key = signatureBigDataKey(*instanceDigest)
- instance = instanceDigest.Encoded()
- }
- if len(signatureSizes) > 0 {
- signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s)", s.image.ID, instance)
- }
- signature = signatureBlob
- }
- for _, length := range signatureSizes {
- if offset+length > len(signature) {
- return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
- }
- sigslice = append(sigslice, signature[offset:offset+length])
- offset += length
- }
- if offset != len(signature) {
- return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset)
- }
- return sigslice, nil
-}
-
-// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
-// it's time to Commit() the image
-func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
- directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
- if err != nil {
- return nil, errors.Wrapf(err, "error creating a temporary directory")
- }
- image := &storageImageDestination{
- imageRef: imageRef,
- directory: directory,
- signatureses: make(map[digest.Digest][]byte),
- blobDiffIDs: make(map[digest.Digest]digest.Digest),
- blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
- fileSizes: make(map[digest.Digest]int64),
- filenames: make(map[digest.Digest]string),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- indexToStorageID: make(map[int]*string),
- indexToPulledBlob: make(map[int]*types.BlobInfo),
- }
- return image, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (s *storageImageDestination) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up the temporary directory and additional layer store handlers.
-func (s *storageImageDestination) Close() error {
- for _, al := range s.blobAdditionalLayer {
- al.Release()
- }
- return os.RemoveAll(s.directory)
-}
-
-func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
- // We ultimately have to decompress layers to populate trees on disk
- // and need to explicitly ask for it here, so that the layers' MIME
- // types can be set accordingly.
- return types.PreserveOriginal
-}
-
-func (s *storageImageDestination) computeNextBlobCacheFile() string {
- return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
-}
-
-// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is
-// set, the blob will be committed directly. Either by the calling goroutine
-// or by another goroutine already committing layers.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with non "WithOptions" functions is not
-// supported.
-func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) {
- info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig)
- if err != nil {
- return info, err
- }
-
- if options.IsConfig || options.LayerIndex == nil {
- return info, nil
- }
-
- return info, s.queueOrCommit(ctx, info, *options.LayerIndex)
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (s *storageImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
-// inputInfo.Size is the expected length of stream, if known.
-// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- // Stores a layer or data blob in our temporary directory, checking that any information
- // in the blobinfo matches the incoming data.
- errorBlobInfo := types.BlobInfo{
- Digest: "",
- Size: -1,
- }
- // Set up to digest the blob and count its size while saving it to a file.
- hasher := digest.Canonical.Digester()
- if blobinfo.Digest.Validate() == nil {
- if a := blobinfo.Digest.Algorithm(); a.Available() {
- hasher = a.Digester()
- }
- }
- diffID := digest.Canonical.Digester()
- filename := s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
- }
- defer file.Close()
- counter := ioutils.NewWriteCounter(hasher.Hash())
- reader := io.TeeReader(io.TeeReader(stream, counter), file)
- decompressed, err := archive.DecompressStream(reader)
- if err != nil {
- return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob")
- }
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- _, err = io.Copy(diffID.Hash(), decompressed)
- decompressed.Close()
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename)
- }
- // Ensure that any information that we were given about the blob is correct.
- if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
- return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch)
- }
- if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
- return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
- }
- // Record information about the blob.
- s.lock.Lock()
- s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
- s.fileSizes[hasher.Digest()] = counter.Count
- s.filenames[hasher.Digest()] = filename
- s.lock.Unlock()
- blobDigest := blobinfo.Digest
- if blobDigest.Validate() != nil {
- blobDigest = hasher.Digest()
- }
- blobSize := blobinfo.Size
- if blobSize < 0 {
- blobSize = counter.Count
- }
- // This is safe because we have just computed both values ourselves.
- cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
- return types.BlobInfo{
- Digest: blobDigest,
- Size: blobSize,
- MediaType: blobinfo.MediaType,
- }, nil
-}
-
-// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
-// options.LayerIndex is set, the reused blob will be recoreded as already
-// pulled.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with the non "WithOptions" functions
-// is not supported.
-func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
- reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef)
- if err != nil || !reused || options.LayerIndex == nil {
- return reused, info, err
+func signatureBigDataKey(digest digest.Digest) (string, error) {
+ if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return "", err
}
-
- return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex)
-}
-
-// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob.
-// If ref is provided, this function first tries to get layer from Additional Layer Store.
-func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if ref != nil {
- // Check if we have the layer in the underlying additional layer store.
- aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q and labels`, blobinfo.Digest)
- } else if err == nil {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
- s.blobAdditionalLayer[blobinfo.Digest] = aLayer
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: aLayer.CompressedSize(),
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
-
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
-
-// tryReusingBlobLocked implements a core functionality of TryReusingBlob.
-// This must be called with a lock being held on storageImageDestination.
-func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- if blobinfo.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
- }
- if err := blobinfo.Digest.Validate(); err != nil {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
- }
-
- // Check if we've already cached it in a file.
- if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: size,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a wasn't-compressed layer in storage that's based on that blob.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Save this for completeness.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a was-compressed layer in storage that's based on that blob.
- layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].CompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Does the blob correspond to a known DiffID which we already have available?
- // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
- // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size.
- if canSubstitute || blobinfo.Size != -1 {
- if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
- }
- if len(layers) > 0 {
- if blobinfo.Size != -1 {
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, blobinfo, nil
- }
- if !canSubstitute {
- return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo)
- }
- s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: uncompressedDigest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
- }
-
- // Nope, we don't have it.
- return false, types.BlobInfo{}, nil
-}
-
-// computeID computes a recommended image ID based on information we have so far. If
-// the manifest is not of a type that we recognize, we return an empty value, indicating
-// that since we don't have a recommendation, a random ID should be used if one needs
-// to be allocated.
-func (s *storageImageDestination) computeID(m manifest.Manifest) string {
- // Build the diffID list. We need the decompressed sums that we've been calculating to
- // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
- // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
- var diffIDs []digest.Digest
- switch m := m.(type) {
- case *manifest.Schema1:
- // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
- // in reverse of the order in which they were originally listed.
- for i, compat := range m.ExtractedV1Compatibility {
- if compat.ThrowAway {
- continue
- }
- blobSum := m.FSLayers[i].BlobSum
- diffID, ok := s.blobDiffIDs[blobSum]
- if !ok {
- logrus.Infof("error looking up diffID for layer %q", blobSum.String())
- return ""
- }
- diffIDs = append([]digest.Digest{diffID}, diffIDs...)
- }
- case *manifest.Schema2, *manifest.OCI1:
- // We know the ID calculation for these formats doesn't actually use the diffIDs,
- // so we don't need to populate the diffID list.
- default:
- return ""
- }
- id, err := m.ImageID(diffIDs)
- if err != nil {
- return ""
- }
- return id
-}
-
-// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
-// information out of it for Inspect().
-func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
- if info.Digest == "" {
- return nil, errors.Errorf(`no digest supplied when reading blob`)
- }
- if err := info.Digest.Validate(); err != nil {
- return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
- }
- // Assume it's a file, since we're only calling this from a place that expects to read files.
- if filename, ok := s.filenames[info.Digest]; ok {
- contents, err2 := ioutil.ReadFile(filename)
- if err2 != nil {
- return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)
- }
- return contents, nil
- }
- // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
- return nil, errors.New("blob not found")
-}
-
-// queueOrCommit queues in the specified blob to be committed to the storage.
-// If no other goroutine is already committing layers, the layer and all
-// subsequent layers (if already queued) will be committed to the storage.
-func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int) error {
- // NOTE: whenever the code below is touched, make sure that all code
- // paths unlock the lock and to unlock it exactly once.
- //
- // Conceptually, the code is divided in two stages:
- //
- // 1) Queue in work by marking the layer as ready to be committed.
- // If at least one previous/parent layer with a lower index has
- // not yet been committed, return early.
- //
- // 2) Process the queued-in work by committing the "ready" layers
- // in sequence. Make sure that more items can be queued-in
- // during the comparatively I/O expensive task of committing a
- // layer.
- //
- // The conceptual benefit of this design is that caller can continue
- // pulling layers after an early return. At any given time, only one
- // caller is the "worker" routine comitting layers. All other routines
- // can continue pulling and queuing in layers.
- s.lock.Lock()
- s.indexToPulledBlob[index] = &blob
-
- // We're still waiting for at least one previous/parent layer to be
- // committed, so there's nothing to do.
- if index != s.currentIndex {
- s.lock.Unlock()
- return nil
- }
-
- for info := s.indexToPulledBlob[index]; info != nil; info = s.indexToPulledBlob[index] {
- s.lock.Unlock()
- layerInfo := manifest.LayerInfo{
- BlobInfo: *info,
- EmptyLayer: info.Digest == image.GzippedEmptyLayerDigest,
- }
- // Note: commitLayer locks on-demand.
- if err := s.commitLayer(ctx, layerInfo, index); err != nil {
- return err
- }
- s.lock.Lock()
- index++
- }
-
- // Set the index at the very end to make sure that only one routine
- // enters stage 2).
- s.currentIndex = index
- s.lock.Unlock()
- return nil
-}
-
-// commitLayer commits the specified blob with the given index to the storage.
-// Note that the previous layer is expected to already be committed.
-//
-// Caution: this function must be called without holding `s.lock`. Callers
-// must guarantee that, at any given time, at most one goroutine may execute
-// `commitLayer()`.
-func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
- // Already commited? Return early.
- if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
- return nil
- }
-
- // Start with an empty string or the previous layer ID. Note that
- // `s.indexToStorageID` can only be accessed by *one* goroutine at any
- // given time. Hence, we don't need to lock accesses.
- var lastLayer string
- if prev := s.indexToStorageID[index-1]; prev != nil {
- lastLayer = *prev
- }
-
- // Carry over the previous ID for empty non-base layers.
- if blob.EmptyLayer {
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if there's already a layer with the ID that we'd give to the result of applying
- // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
- s.lock.Lock()
- diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
- s.lock.Unlock()
- if !haveDiffID {
- // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
- // or to even check if we had it.
- // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
- // that relies on using a blob digest that has never been seen by the store had better call
- // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
- // so far we are going to accommodate that (if we should be doing that at all).
- logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- // NOTE: use `TryReusingBlob` to prevent recursion.
- has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
- if err != nil {
- return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
- }
- if !has {
- return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
- }
- diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
- if !haveDiffID {
- return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
- }
- }
- id := diffID.Hex()
- if lastLayer != "" {
- id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
- }
- if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
- // There's already a layer that should have the right contents, just reuse it.
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- s.lock.Lock()
- al, ok := s.blobAdditionalLayer[blob.Digest]
- s.lock.Unlock()
- if ok {
- layer, err := al.PutAs(id, lastLayer, nil)
- if err != nil {
- return errors.Wrapf(err, "failed to put layer from digest and labels")
- }
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if we previously cached a file with that blob's contents. If we didn't,
- // then we need to read the desired contents from a layer.
- s.lock.Lock()
- filename, ok := s.filenames[blob.Digest]
- s.lock.Unlock()
- if !ok {
- // Try to find the layer with contents matching that blobsum.
- layer := ""
- layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- } else {
- layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- }
- }
- if layer == "" {
- return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
- }
- // Read the layer's contents.
- noCompression := archive.Uncompressed
- diffOptions := &storage.DiffOptions{
- Compression: &noCompression,
- }
- diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
- if err2 != nil {
- return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
- }
- // Copy the layer diff to a file. Diff() takes a lock that it holds
- // until the ReadCloser that it returns is closed, and PutLayer() wants
- // the same lock, so the diff can't just be directly streamed from one
- // to the other.
- filename = s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- diff.Close()
- return errors.Wrapf(err, "error creating temporary file %q", filename)
- }
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using
- // ctx.Done().
- _, err = io.Copy(file, diff)
- diff.Close()
- file.Close()
- if err != nil {
- return errors.Wrapf(err, "error storing blob to file %q", filename)
- }
- // Make sure that we can find this file later, should we need the layer's
- // contents again.
- s.lock.Lock()
- s.filenames[blob.Digest] = filename
- s.lock.Unlock()
- }
- // Read the cached blob and use it as a diff.
- file, err := os.Open(filename)
- if err != nil {
- return errors.Wrapf(err, "error opening file %q", filename)
- }
- defer file.Close()
- // Build the new layer using the diff, regardless of where it came from.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
- if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
- return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
- }
-
- s.indexToStorageID[index] = &layer.ID
- return nil
+ return "signature-" + digest.Encoded(), nil
}
-func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if len(s.manifest) == 0 {
- return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
- }
- toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
- if err != nil {
- return errors.Wrapf(err, "error retrieving top-level manifest")
- }
- // If the name we're saving to includes a digest, then check that the
- // manifests that we're about to save all either match the one from the
- // unparsedToplevel, or match the digest in the name that we're using.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
- if err != nil {
- return err
- }
- if !matches {
- matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
- if err != nil {
- return err
- }
- }
- if !matches {
- return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
- }
- }
- }
- // Find the list of layer blobs.
- if len(s.manifest) == 0 {
- return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
- }
- man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
- if err != nil {
- return errors.Wrapf(err, "error parsing manifest")
- }
- layerBlobs := man.LayerInfos()
- // Extract, commit, or find the layers.
- for i, blob := range layerBlobs {
- if err := s.commitLayer(ctx, blob, i); err != nil {
- return err
- }
- }
- var lastLayer string
- if len(layerBlobs) > 0 { // Can happen when using caches
- prev := s.indexToStorageID[len(layerBlobs)-1]
- if prev == nil {
- return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been commited (lastLayer == nil)", len(layerBlobs)-1)
- }
- lastLayer = *prev
- }
-
- // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
- // was originally created, in case we're just copying it. If not, no harm done.
- options := &storage.ImageOptions{}
- if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
- logrus.Debugf("setting image creation date to %s", inspect.Created)
- options.CreationDate = *inspect.Created
- }
- // Create the image record, pointing to the most-recently added layer.
- intendedID := s.imageRef.id
- if intendedID == "" {
- intendedID = s.computeID(man)
- }
- oldNames := []string{}
- img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
- if err != nil {
- if errors.Cause(err) != storage.ErrDuplicateID {
- logrus.Debugf("error creating image: %q", err)
- return errors.Wrapf(err, "error creating image %q", intendedID)
- }
- img, err = s.imageRef.transport.store.Image(intendedID)
- if err != nil {
- return errors.Wrapf(err, "error reading image %q", intendedID)
- }
- if img.TopLayer != lastLayer {
- logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
- return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
- }
- logrus.Debugf("reusing image ID %q", img.ID)
- oldNames = append(oldNames, img.Names...)
- } else {
- logrus.Debugf("created new image ID %q", img.ID)
- }
- // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
- // we just need to screen out the ones that are actually layers to get the list of non-layers.
- dataBlobs := make(map[digest.Digest]struct{})
- for blob := range s.filenames {
- dataBlobs[blob] = struct{}{}
- }
- for _, layerBlob := range layerBlobs {
- delete(dataBlobs, layerBlob.Digest)
- }
- for blob := range dataBlobs {
- v, err := ioutil.ReadFile(s.filenames[blob])
- if err != nil {
- return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
- }
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
- return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
- }
- }
- // Set the reference's name on the image. We don't need to worry about avoiding duplicate
- // values because SetNames() will deduplicate the list that we pass to it.
- if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
- names := []string{}
- if name != nil {
- names = append(names, name.String())
- }
- if len(oldNames) > 0 {
- names = append(names, oldNames...)
- }
- if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
- return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID)
- }
- logrus.Debugf("set names of image %q to %v", img.ID, names)
- }
- // Save the unparsedToplevel's manifest.
- if len(toplevelManifest) != 0 {
- manifestDigest, err := manifest.Digest(toplevelManifest)
- if err != nil {
- return errors.Wrapf(err, "error digesting top-level manifest")
- }
- key := manifestBigDataKey(manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving top-level manifest for image %q", img.ID)
- }
- }
- // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
- // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
- // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
- manifestDigest, err := manifest.Digest(s.manifest)
- if err != nil {
- return errors.Wrapf(err, "error computing manifest digest")
- }
- key := manifestBigDataKey(manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
- }
- key = storage.ImageDigestBigDataKey
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
- }
- // Save the signatures, if we have any.
- if len(s.signatures) > 0 {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
- }
- }
- for instanceDigest, signatures := range s.signatureses {
- key := signatureBigDataKey(instanceDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
- }
- }
- // Save our metadata.
- metadata, err := json.Marshal(s)
- if err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error encoding metadata for image %q", img.ID)
- }
- if len(metadata) != 0 {
- if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "error saving metadata for image %q", img.ID)
- }
- logrus.Debugf("saved image metadata %q", string(metadata))
- }
- return nil
-}
-
-var manifestMIMETypes = []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- manifest.DockerV2Schema1SignedMediaType,
- manifest.DockerV2Schema1MediaType,
-}
-
-func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
- return manifestMIMETypes
-}
-
-// PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
- newBlob := make([]byte, len(manifestBlob))
- copy(newBlob, manifestBlob)
- s.manifest = newBlob
- return nil
-}
-
-// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
-// previously supplied to PutSignatures().
-func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
-// uploaded to the image destination, true otherwise.
-func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (s *storageImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
- return true // Yes, we want the unmodified manifest
-}
-
-// PutSignatures records the image's signatures for committing as a single data blob.
-func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- sizes := []int{}
- sigblob := []byte{}
- for _, sig := range signatures {
- sizes = append(sizes, len(sig))
- newblob := make([]byte, len(sigblob)+len(sig))
- copy(newblob, sigblob)
- copy(newblob[len(sigblob):], sig)
- sigblob = newblob
- }
- if instanceDigest == nil {
- s.signatures = sigblob
- s.SignatureSizes = sizes
- }
- if instanceDigest == nil && len(s.manifest) > 0 {
- manifestDigest, err := manifest.Digest(s.manifest)
- if err != nil {
- return err
- }
- instanceDigest = &manifestDigest
- }
- if instanceDigest != nil {
- s.signatureses[*instanceDigest] = sigblob
- s.SignaturesSizes[*instanceDigest] = sizes
- }
- return nil
-}
-
-// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) getSize() (int64, error) {
- var sum int64
- // Size up the data blobs.
- dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
- if err != nil {
- return -1, errors.Wrapf(err, "error reading image %q", s.image.ID)
- }
- for _, dataName := range dataNames {
- bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
- if err != nil {
- return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID)
- }
- sum += bigSize
- }
- // Add the signature sizes.
- for _, sigSize := range s.SignatureSizes {
- sum += int64(sigSize)
- }
- // Walk the layer list.
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return -1, err
- }
- if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
- return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
- }
- sum += layer.UncompressedSize
- if layer.Parent == "" {
- break
- }
- layerID = layer.Parent
- }
- return sum, nil
+// storageImageMetadata is stored, as JSON, in storage.Image.Metadata
+type storageImageMetadata struct {
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
}
-// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) Size() (int64, error) {
- return s.getSize()
+type storageImageCloser struct {
+ types.ImageCloser
+ size int64
}
// Size() returns the previously-computed size of the image, with no error.
@@ -1257,7 +55,7 @@ func (s *storageImageCloser) Size() (int64, error) {
// newImage creates an image that also knows its size
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, s)
+ src, err := newImageSource(sys, s)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 394557f39..acc4cb30e 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -1,18 +1,20 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"context"
+ "fmt"
+ "slices"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -31,11 +33,11 @@ func newReference(transport storageTransport, named reference.Named, id string)
return nil, ErrInvalidReference
}
if named != nil && reference.IsNameOnly(named) {
- return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String())
+ return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
}
if id != "" {
if err := validateImageID(id); err != nil {
- return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err)
+ return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference)
}
}
// We take a copy of the transport, which contains a pointer to the
@@ -52,29 +54,29 @@ func newReference(transport storageTransport, named reference.Named, id string)
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
repo := ref.Name()
- for _, name := range image.Names {
- if named, err := reference.ParseNormalizedNamed(name); err == nil {
- if named.Name() == repo {
- return true
- }
+ return slices.ContainsFunc(image.Names, func(name string) bool {
+ if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
+ return true
}
- }
- return false
+ return false
+ })
}
-// imageMatchesSystemContext checks if the passed-in image both contains a
-// manifest that matches the passed-in digest, and identifies itself as being
-// appropriate for running on the system that matches sys.
-// If we somehow ended up sharing the same storage among multiple types of
-// systems, and managed to download multiple images from the same manifest
-// list, their image records will all contain copies of the manifest list, and
-// this check will help us decide which of them we want to return when we've
-// been asked to resolve an image reference that uses the list's digest to a
-// specific image ID.
-func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
- // First, check if the image record has a manifest that matches the
- // specified digest.
- key := manifestBigDataKey(manifestDigest)
+// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
+// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
+// image instance that matches sys.
+//
+// See the comment in storageReference.ResolveImage explaining why
+// this check is necessary.
+func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
+ // Load the manifest that matches the specified digest.
+ // We don't need to care about storage.ImageDigestBigDataKey because
+ // manifests lists are only stored into storage by c/image versions
+ // that know about manifestBigDataKey, and only using that key.
+ key, err := manifestBigDataKey(manifestDigest)
+ if err != nil {
+ return false // This should never happen, manifestDigest comes from a reference.Digested, and that validates the format.
+ }
manifestBytes, err := store.ImageBigData(img.ID, key)
if err != nil {
return false
@@ -83,60 +85,31 @@ func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifest
// the digest of the instance that matches the current system, and try
// to load that manifest from the image record, and use it.
manifestType := manifest.GuessMIMEType(manifestBytes)
- if manifest.MIMETypeIsMultiImage(manifestType) {
- list, err := manifest.ListFromBlob(manifestBytes, manifestType)
- if err != nil {
- return false
- }
- manifestDigest, err = list.ChooseInstance(sys)
- if err != nil {
- return false
- }
- key = manifestBigDataKey(manifestDigest)
- manifestBytes, err = store.ImageBigData(img.ID, key)
- if err != nil {
- return false
- }
- manifestType = manifest.GuessMIMEType(manifestBytes)
+ if !manifest.MIMETypeIsMultiImage(manifestType) {
+ // manifestDigest directly specifies a per-platform image, so we aren't
+ // choosing among different variants.
+ return false
}
- // Load the image's configuration blob.
- m, err := manifest.FromBlob(manifestBytes, manifestType)
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
return false
}
- getConfig := func(blobInfo types.BlobInfo) ([]byte, error) {
- return store.ImageBigData(img.ID, blobInfo.Digest.String())
- }
- ii, err := m.Inspect(getConfig)
+ chosenInstance, err := list.ChooseInstance(sys)
if err != nil {
return false
}
- // Build a dummy index containing one instance and information about
- // the image's target system from the image's configuration.
- index := manifest.OCI1IndexFromComponents([]imgspecv1.Descriptor{{
- MediaType: imgspecv1.MediaTypeImageManifest,
- Digest: manifestDigest,
- Size: int64(len(manifestBytes)),
- Platform: &imgspecv1.Platform{
- OS: ii.Os,
- Architecture: ii.Architecture,
- },
- }}, nil)
- // Check that ChooseInstance() would select this image for this system,
- // from a list of images.
- instanceDigest, err := index.ChooseInstance(sys)
+ key, err = manifestBigDataKey(chosenInstance)
if err != nil {
return false
}
- // Double-check that we can read the runnable image's manifest from the
- // image record.
- key = manifestBigDataKey(instanceDigest)
_, err = store.ImageBigData(img.ID, key)
- return err == nil
+ return err == nil // true if img.ID is based on chosenInstance.
}
// Resolve the reference's name to an image ID in the store, if there's already
// one present with the same name or ID, and return the image.
+//
+// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) {
var loadedImage *storage.Image
if s.id == "" && s.named != nil {
@@ -152,11 +125,24 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
// Look for an image with the specified digest that has the same name,
// though possibly with a different tag or digest, as a Name value, so
// that the canonical reference can be implicitly resolved to the image.
+ //
+ // Typically there should be at most one such image, because the same
+ // manifest digest implies the same config, and we choose the storage ID
+ // based on the config (deduplicating images), except:
+ // - the user can explicitly specify an ID when creating the image.
+ // In this case we don't have a preference among the alternatives.
+ // - when pulling an image from a multi-platform manifest list, we also
+ // store the manifest list in the image; this allows referencing a
+ // per-platform image using the manifest list digest, but that also
+ // means that we can have multiple genuinely different images in the
+ // storage matching the same manifest list digest (if pulled using different
+ // SystemContext.{OS,Architecture,Variant}Choice to the same storage).
+ // In this case we prefer the image matching the current SystemContext.
images, err := s.transport.store.ImagesByDigest(digested.Digest())
if err == nil && len(images) > 0 {
for _, image := range images {
if imageMatchesRepo(image, s.named) {
- if loadedImage == nil || imageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
+ if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
loadedImage = image
s.id = image.ID
}
@@ -167,12 +153,12 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
}
if s.id == "" {
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
- return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
}
if loadedImage == nil {
img, err := s.transport.store.Image(s.id)
if err != nil {
- return nil, errors.Wrapf(err, "error reading image %q", s.id)
+ return nil, fmt.Errorf("reading image %q: %w", s.id, err)
}
loadedImage = img
}
@@ -192,11 +178,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
// sake of older consumers that don't know there's a whole list in there now.
if s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
- for _, digest := range loadedImage.Digests {
- if digest == digested.Digest() {
- loadedImage.Digest = digest
- break
- }
+ digest := digested.Digest()
+ if slices.Contains(loadedImage.Digests, digest) {
+ loadedImage.Digest = digest
}
}
}
@@ -229,10 +213,10 @@ func (s storageReference) StringWithinTransport() string {
}
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.named != nil {
- res = res + s.named.String()
+ res += s.named.String()
}
if s.id != "" {
- res = res + "@" + s.id
+ res += "@" + s.id
}
return res
}
@@ -240,10 +224,10 @@ func (s storageReference) StringWithinTransport() string {
func (s storageReference) PolicyConfigurationIdentity() string {
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.named != nil {
- res = res + s.named.String()
+ res += s.named.String()
}
if s.id != "" {
- res = res + "@" + s.id
+ res += "@" + s.id
}
return res
}
@@ -302,9 +286,37 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont
}
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- return newImageSource(ctx, sys, s)
+ return newImageSource(sys, s)
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(sys, s)
}
+
+// ResolveReference finds the underlying storage image for a storage.Transport reference.
+// It returns that image, and an updated reference which can be used to refer back to the _same_
+// image again.
+//
+// This matters if the input reference contains a tagged name; the destination of the tag can
+// move in local storage. The updated reference returned by this function contains the resolved
+// image ID, so later uses of that updated reference will either continue to refer to the same
+// image, or fail.
+//
+// Note that it _is_ possible for the later uses to fail, either because the image was removed
+// completely, or because the name used in the reference was untaged (even if the underlying image
+// ID still exists in local storage).
+//
+// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
+func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) {
+ sref, ok := ref.(*storageReference)
+ if !ok {
+ return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(),
+ transports.ImageName(ref))
+ }
+ clone := *sref // A shallow copy we can update
+ img, err := clone.resolveImage(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ return clone, img, nil
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go
new file mode 100644
index 000000000..55788f887
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_src.go
@@ -0,0 +1,480 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "slices"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type storageImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
+ imageRef storageReference
+ image *storage.Image
+ systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
+ metadata storageImageMetadata
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
+ getBlobMutexProtected getBlobMutexProtected
+}
+
+// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
+type getBlobMutexProtected struct {
+ // digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the
+ // layer ID in the store.
+ digestToLayerID map[digest.Digest]string
+
+ // layerPosition stores where we are in reading a blob's layers
+ layerPosition map[digest.Digest]int
+}
+
+// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer.
+// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID
+// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest.
+const expectedLayerDiffIDFlag = "expected-layer-diffid"
+
+// newImageSource sets up an image for reading.
+func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
+ img, err := imageRef.resolveImage(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the reader object.
+ image := &storageImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
+
+ imageRef: imageRef,
+ systemContext: sys,
+ image: img,
+ metadata: storageImageMetadata{
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ },
+ getBlobMutexProtected: getBlobMutexProtected{
+ digestToLayerID: make(map[digest.Digest]string),
+ layerPosition: make(map[digest.Digest]int),
+ },
+ }
+ image.Compat = impl.AddCompat(image)
+ if img.Metadata != "" {
+ if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil {
+ return nil, fmt.Errorf("decoding metadata for source image: %w", err)
+ }
+ }
+ return image, nil
+}
+
+// Reference returns the image reference that we used to find this image.
+func (s *storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s *storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ // We need a valid digest value.
+ digest := info.Digest
+
+ if err := digest.Validate(); err != nil {
+ return nil, 0, err
+ }
+
+ if digest == image.GzippedEmptyLayerDigest {
+ return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
+ }
+
+ var layers []storage.Layer
+
+ // This lookup path is strictly necessary for layers identified by TOC digest
+ // (where LayersByUncompressedDigest might not find our layer);
+ // for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call.
+ s.getBlobMutex.Lock()
+ layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
+ s.getBlobMutex.Unlock()
+
+ if found {
+ if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil {
+ layers = []storage.Layer{*layer}
+ }
+ } else {
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest)
+ }
+
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String())
+ if err != nil {
+ return nil, 0, err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", digest.String())
+ return io.NopCloser(r), int64(r.Len()), nil
+ }
+
+ // NOTE: the blob is first written to a temporary file and subsequently
+ // closed. The intention is to keep the time we own the storage lock
+ // as short as possible to allow other processes to access the storage.
+ rc, n, _, err := s.getBlobAndLayerID(digest, layers)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer rc.Close()
+
+ tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "")
+ if err != nil {
+ return nil, 0, err
+ }
+ success := false
+ tmpFileRemovePending := true
+ defer func() {
+ if !success {
+ tmpFile.Close()
+ if tmpFileRemovePending {
+ os.Remove(tmpFile.Name())
+ }
+ }
+ }()
+ // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
+ // cleaned up on process termination (or if the caller forgets to invoke Close())
+ // On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
+ if err := os.Remove(tmpFile.Name()); err == nil {
+ tmpFileRemovePending = false
+ }
+
+ if _, err := io.Copy(tmpFile, rc); err != nil {
+ return nil, 0, err
+ }
+ if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
+ return nil, 0, err
+ }
+
+ success = true
+
+ if tmpFileRemovePending {
+ return ioutils.NewReadCloserWrapper(tmpFile, func() error {
+ tmpFile.Close()
+ return os.Remove(tmpFile.Name())
+ }), n, nil
+ }
+
+ return tmpFile, n, nil
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ s.getBlobMutex.Lock()
+ i := s.getBlobMutexProtected.layerPosition[digest]
+ s.getBlobMutexProtected.layerPosition[digest] = i + 1
+ s.getBlobMutex.Unlock()
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
+ if instanceDigest != nil {
+ key, err := manifestBigDataKey(*instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
+ }
+ return blob, manifest.GuessMIMEType(blob), err
+ }
+ if len(s.cachedManifest) == 0 {
+ // The manifest is stored as a big data item.
+ // Prefer the manifest corresponding to the user-specified digest, if available.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ key, err := manifestBigDataKey(digested.Digest())
+ if err != nil {
+ return nil, "", err
+ }
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
+ return nil, "", err
+ }
+ if err == nil {
+ s.cachedManifest = blob
+ }
+ }
+ }
+ // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
+ // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err)
+ }
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)")
+ }
+ man, err := manifest.FromBlob(manifestBlob, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err)
+ }
+
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ }
+
+ physicalBlobInfos := []types.BlobInfo{} // Built reversed
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
+ }
+
+ blobDigest := layer.UncompressedDigest
+ if blobDigest == "" {
+ if layer.TOCDigest == "" {
+ return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
+ }
+ if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
+ return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
+ }
+ expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string)
+ if !ok {
+ return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
+ }
+ // If the layer is stored by its TOC, report the expected diffID as the layer Digest;
+ // the generic code is responsible for validating the digest.
+ // We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID.
+ blobDigest, err = digest.Parse(expectedDigest)
+ if err != nil {
+ return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
+ }
+ }
+ size := layer.UncompressedSize
+ if size < 0 {
+ size = -1
+ }
+ s.getBlobMutex.Lock()
+ s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
+ s.getBlobMutex.Unlock()
+ blobInfo := types.BlobInfo{
+ Digest: blobDigest,
+ Size: size,
+ MediaType: uncompressedLayerType,
+ }
+ physicalBlobInfos = append(physicalBlobInfos, blobInfo)
+ layerID = layer.Parent
+ }
+ slices.Reverse(physicalBlobInfos)
+
+ res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
+ if err != nil {
+ return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err)
+ }
+ return res, nil
+}
+
+// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
+// but using layer data which we can actually produce — physicalInfos for non-empty layers,
+// and image.GzippedEmptyLayer for empty ones.
+// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
+func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
+ nextPhysical := 0
+ res := make([]types.BlobInfo, len(manifestInfos))
+ for i, mi := range manifestInfos {
+ if mi.EmptyLayer {
+ res[i] = types.BlobInfo{
+ Digest: image.GzippedEmptyLayerDigest,
+ Size: int64(len(image.GzippedEmptyLayer)),
+ MediaType: mi.MediaType,
+ }
+ } else {
+ if nextPhysical >= len(physicalInfos) {
+ return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
+ }
+ res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ nextPhysical++
+ }
+ }
+ if nextPhysical != len(physicalInfos) {
+ return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
+ }
+ return res, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var offset int
+ signatureBlobs := []byte{}
+ signatureSizes := s.metadata.SignatureSizes
+ key := "signatures"
+ instance := "default instance"
+ if instanceDigest != nil {
+ signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
+ k, err := signatureBigDataKey(*instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ key = k
+ if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
+ return nil, err
+ }
+ instance = instanceDigest.Encoded()
+ }
+ if len(signatureSizes) > 0 {
+ data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err)
+ }
+ signatureBlobs = data
+ }
+ res := []signature.Signature{}
+ for _, length := range signatureSizes {
+ if offset+length > len(signatureBlobs) {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length)
+ }
+ sig, err := signature.FromBlob(signatureBlobs[offset : offset+length])
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err)
+ }
+ res = append(res, sig)
+ offset += length
+ }
+ if offset != len(signatureBlobs) {
+ return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset)
+ }
+ return res, nil
+}
+
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) getSize() (int64, error) {
+ var sum int64
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
+ if err != nil {
+ return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err)
+ }
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
+ if err != nil {
+ return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err)
+ }
+ sum += bigSize
+ }
+ // Add the signature sizes.
+ for _, sigSize := range s.metadata.SignatureSizes {
+ sum += int64(sigSize)
+ }
+ // Walk the layer list.
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || (layer.TOCDigest == "" && layer.UncompressedSize < 0) {
+ return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ // FIXME: We allow layer.UncompressedSize < 0 above, because currently images in an Additional Layer Store don’t provide that value.
+ // Right now, various callers in Podman (and, also, newImage in this package) don’t expect the size computation to fail.
+ // Should we update the callers, or do we need to continue returning inaccurate information here? Or should we pay the cost
+ // to compute the size from the diff?
+ if layer.UncompressedSize >= 0 {
+ sum += layer.UncompressedSize
+ }
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
+ }
+ return sum, nil
+}
+
+// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) Size() (int64, error) {
+ return s.getSize()
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go
index c024bee9b..b981953ad 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go
@@ -1,8 +1,10 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
+ "errors"
"fmt"
"path/filepath"
"strings"
@@ -13,7 +15,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -47,9 +48,26 @@ type StoreTransport interface {
GetStoreIfSet() storage.Store
// GetImage retrieves the image from the transport's store that's named
// by the reference.
+ // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+ // this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+ // can return different images, with no way for the caller to "freeze" the storage.Image identity
+ // without discarding the name entirely.
+ //
+ // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+ // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetImage(types.ImageReference) (*storage.Image, error)
// GetStoreImage retrieves the image from a specified store that's named
// by the reference.
+ //
+ // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+ // this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+ // can return different images, with no way for the caller to "freeze" the storage.Image identity
+ // without discarding the name entirely.
+ //
+ // Also, a StoreTransport reference already contains a store, so providing another one is redundant.
+ //
+ // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+ // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
// ParseStoreReference parses a reference, overriding any store
// specification that it may contain.
@@ -116,13 +134,13 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
if ref == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref)
+ return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference)
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
- return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
+ return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference)
}
ref = ref[closeIndex+1:]
}
@@ -134,7 +152,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
if split != -1 {
possibleID := ref[split+1:]
if possibleID == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref)
+ return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference)
}
// If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil {
@@ -146,7 +164,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// so we might as well use the expanded value.
id = img.ID
} else {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID)
+ return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference)
}
// We have recognized an image ID; peel it off.
ref = ref[:split]
@@ -172,7 +190,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
var err error
named, err = reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
+ return nil, fmt.Errorf("parsing named reference %q: %w", ref, err)
}
named = reference.TagNameOnly(named)
}
@@ -195,7 +213,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
- options, err := storage.DefaultStoreOptionsAutoDetectUID()
+ options, err := storage.DefaultStoreOptions()
if err != nil {
return nil, err
}
@@ -224,7 +242,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
// needs to match a store that was previously initialized using
// storage.GetStore(), or be enough to let the storage library fill out
// the rest using knowledge that it has from elsewhere.
- if reference[0] == '[' {
+ if len(reference) > 0 && reference[0] == '[' {
closeIndex := strings.IndexRune(reference, ']')
if closeIndex < 1 {
return nil, ErrInvalidReference
@@ -233,35 +251,30 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
reference = reference[closeIndex+1:]
// Peel off a "driver@" from the start.
driverInfo := ""
- driverSplit := strings.SplitN(storeSpec, "@", 2)
- if len(driverSplit) != 2 {
+ driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
+ if !gotDriver {
+ storeSpec = driverPart1
if storeSpec == "" {
return nil, ErrInvalidReference
}
} else {
- driverInfo = driverSplit[0]
+ driverInfo = driverPart1
if driverInfo == "" {
return nil, ErrInvalidReference
}
- storeSpec = driverSplit[1]
+ storeSpec = driverPart2
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
- optionsSplit := strings.SplitN(storeSpec, ":", 2)
- if len(optionsSplit) == 2 {
- options = strings.Split(optionsSplit[1], ",")
- storeSpec = optionsSplit[0]
+ storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
+ if gotOptions {
+ options = strings.Split(optionsPart, ",")
}
// Peel off a "+runroot" from the new end.
- runRootInfo := ""
- runRootSplit := strings.SplitN(storeSpec, "+", 2)
- if len(runRootSplit) == 2 {
- runRootInfo = runRootSplit[1]
- storeSpec = runRootSplit[0]
- }
+ storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.
@@ -294,6 +307,15 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
return s.ParseStoreReference(store, reference)
}
+// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+// can return different images, with no way for the caller to "freeze" the storage.Image identity
+// without discarding the name entirely.
+//
+// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
+//
+// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
dref := ref.DockerReference()
if dref != nil {
@@ -303,13 +325,20 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
}
if sref, ok := ref.(*storageReference); ok {
tmpRef := *sref
- if img, err := tmpRef.resolveImage(&types.SystemContext{}); err == nil {
+ if img, err := tmpRef.resolveImage(nil); err == nil {
return img, nil
}
}
return nil, storage.ErrImageUnknown
}
+// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+// can return different images, with no way for the caller to "freeze" the storage.Image identity
+// without discarding the name entirely.
+//
+// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
store, err := s.GetStore()
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go
index e9d321b8f..064c78b17 100644
--- a/vendor/github.com/containers/image/v5/tarball/doc.go
+++ b/vendor/github.com/containers/image/v5/tarball/doc.go
@@ -2,6 +2,7 @@
// tarballs and an optional template configuration.
//
// An example:
+//
// package main
//
// import (
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
index 23f67c49e..4e7ef3bcf 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
@@ -3,13 +3,13 @@ package tarball
import (
"context"
"fmt"
+ "maps"
"os"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
-
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -35,9 +35,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[
if r.annotations == nil {
r.annotations = make(map[string]string)
}
- for k, v := range annotations {
- r.annotations[k] = v
- }
+ maps.Copy(r.annotations, annotations)
return nil
}
@@ -67,22 +65,13 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := r.NewImageSource(ctx, sys)
- if err != nil {
- return nil, err
- }
- img, err := image.FromSource(ctx, sys, src)
- if err != nil {
- src.Close()
- return nil, err
- }
- return img, nil
+ return image.FromReference(ctx, sys, r)
}
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
for _, filename := range r.filenames {
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("error removing %q: %v", filename, err)
+ return fmt.Errorf("error removing %q: %w", filename, err)
}
}
return nil
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 694ad17bd..7d4a83bc9 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -6,12 +6,14 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "maps"
"os"
"runtime"
"strings"
"time"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@@ -20,51 +22,64 @@ import (
)
type tarballImageSource struct {
- reference tarballReference
- filenames []string
- diffIDs []digest.Digest
- diffSizes []int64
- blobIDs []digest.Digest
- blobSizes []int64
- blobTypes []string
- config []byte
- configID digest.Digest
- configSize int64
- manifest []byte
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ reference tarballReference
+ blobs map[digest.Digest]tarballBlob
+ manifest []byte
+}
+
+// tarballBlob is a blob that tarballImagSource can return by GetBlob.
+type tarballBlob struct {
+ contents []byte // or nil to read from filename below
+ filename string // valid if contents == nil
+ size int64
}
func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- // Gather up the digests, sizes, and date information for all of the files.
- filenames := []string{}
+ // Pick up the layer comment from the configuration's history list, if one is set.
+ comment := "imported from tarball"
+ if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
+ comment = r.config.History[0].Comment
+ }
+
+ // Gather up the digests, sizes, and history information for all of the files.
+ blobs := map[digest.Digest]tarballBlob{}
diffIDs := []digest.Digest{}
- diffSizes := []int64{}
- blobIDs := []digest.Digest{}
- blobSizes := []int64{}
- blobTimes := []time.Time{}
- blobTypes := []string{}
+ created := time.Time{}
+ history := []imgspecv1.History{}
+ layerDescriptors := []imgspecv1.Descriptor{}
for _, filename := range r.filenames {
- var file *os.File
- var err error
- var blobSize int64
- var blobTime time.Time
var reader io.Reader
+ var blobTime time.Time
+ var blob tarballBlob
if filename == "-" {
- blobSize = int64(len(r.stdin))
- blobTime = time.Now()
reader = bytes.NewReader(r.stdin)
+ blobTime = time.Now()
+ blob = tarballBlob{
+ contents: r.stdin,
+ size: int64(len(r.stdin)),
+ }
} else {
- file, err = os.Open(filename)
+ file, err := os.Open(filename)
if err != nil {
- return nil, fmt.Errorf("error opening %q for reading: %v", filename, err)
+ return nil, err
}
defer file.Close()
reader = file
fileinfo, err := file.Stat()
if err != nil {
- return nil, fmt.Errorf("error reading size of %q: %v", filename, err)
+ return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
}
- blobSize = fileinfo.Size()
blobTime = fileinfo.ModTime()
+ blob = tarballBlob{
+ filename: filename,
+ size: fileinfo.Size(),
+ }
}
// Default to assuming the layer is compressed.
@@ -87,47 +102,34 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
uncompressed = nil
}
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- n, err := io.Copy(ioutil.Discard, reader)
- if err != nil {
- return nil, fmt.Errorf("error reading %q: %v", filename, err)
+ if _, err := io.Copy(io.Discard, reader); err != nil {
+ return nil, fmt.Errorf("error reading %q: %w", filename, err)
}
if uncompressed != nil {
uncompressed.Close()
}
// Grab our uncompressed and possibly-compressed digests and sizes.
- filenames = append(filenames, filename)
- diffIDs = append(diffIDs, diffIDdigester.Digest())
- diffSizes = append(diffSizes, n)
- blobIDs = append(blobIDs, blobIDdigester.Digest())
- blobSizes = append(blobSizes, blobSize)
- blobTimes = append(blobTimes, blobTime)
- blobTypes = append(blobTypes, layerType)
- }
+ diffID := diffIDdigester.Digest()
+ blobID := blobIDdigester.Digest()
+ diffIDs = append(diffIDs, diffID)
+ blobs[blobID] = blob
- // Build the rootfs and history for the configuration blob.
- rootfs := imgspecv1.RootFS{
- Type: "layers",
- DiffIDs: diffIDs,
- }
- created := time.Time{}
- history := []imgspecv1.History{}
- // Pick up the layer comment from the configuration's history list, if one is set.
- comment := "imported from tarball"
- if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
- comment = r.config.History[0].Comment
- }
- for i := range diffIDs {
- createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator)
history = append(history, imgspecv1.History{
- Created: &blobTimes[i],
- CreatedBy: createdBy,
+ Created: &blobTime,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.
- if created.Before(blobTimes[i]) {
- created = blobTimes[i]
+ if created.Before(blobTime) {
+ created = blobTime
}
+
+ layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
+ Digest: blobID,
+ Size: blob.size,
+ MediaType: layerType,
+ })
}
// Pick up other defaults from the config in the reference.
@@ -141,63 +143,55 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
if config.OS == "" {
config.OS = runtime.GOOS
}
- config.RootFS = rootfs
+ config.RootFS = imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
config.History = history
// Encode and digest the image configuration blob.
configBytes, err := json.Marshal(&config)
if err != nil {
- return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
+ return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err)
}
configID := digest.Canonical.FromBytes(configBytes)
- configSize := int64(len(configBytes))
-
- // Populate a manifest with the configuration blob and the file as the single layer.
- layerDescriptors := []imgspecv1.Descriptor{}
- for i := range blobIDs {
- layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
- Digest: blobIDs[i],
- Size: blobSizes[i],
- MediaType: blobTypes[i],
- })
- }
- annotations := make(map[string]string)
- for k, v := range r.annotations {
- annotations[k] = v
+ blobs[configID] = tarballBlob{
+ contents: configBytes,
+ size: int64(len(configBytes)),
}
+
+ // Populate a manifest with the configuration blob and the layers.
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
},
Config: imgspecv1.Descriptor{
Digest: configID,
- Size: configSize,
+ Size: int64(len(configBytes)),
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
- Annotations: annotations,
+ Annotations: maps.Clone(r.annotations),
}
// Encode the manifest.
manifestBytes, err := json.Marshal(&manifest)
if err != nil {
- return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
+ return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err)
}
// Return the image.
src := &tarballImageSource{
- reference: *r,
- filenames: filenames,
- diffIDs: diffIDs,
- diffSizes: diffSizes,
- blobIDs: blobIDs,
- blobSizes: blobSizes,
- blobTypes: blobTypes,
- config: configBytes,
- configID: configID,
- configSize: configSize,
- manifest: manifestBytes,
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
+
+ reference: *r,
+ blobs: blobs,
+ manifest: manifestBytes,
}
+ src.Compat = impl.AddCompat(src)
return src, nil
}
@@ -206,34 +200,22 @@ func (is *tarballImageSource) Close() error {
return nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- // We should only be asked about things in the manifest. Maybe the configuration blob.
- if blobinfo.Digest == is.configID {
- return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
+ blob, ok := is.blobs[blobinfo.Digest]
+ if !ok {
+ return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
- // Maybe one of the layer blobs.
- for i := range is.blobIDs {
- if blobinfo.Digest == is.blobIDs[i] {
- // We want to read that layer: open the file or memory block and hand it back.
- if is.filenames[i] == "-" {
- return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
- }
- reader, err := os.Open(is.filenames[i])
- if err != nil {
- return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
- }
- return reader, is.blobSizes[i], nil
- }
+ if blob.contents != nil {
+ return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil
+ }
+ reader, err := os.Open(blob.filename)
+ if err != nil {
+ return nil, -1, err
}
- return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
+ return reader, blob.size, nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
@@ -247,28 +229,6 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
- }
- return nil, nil
-}
-
func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index d407c657f..b33208a51 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -3,7 +3,7 @@ package tarball
import (
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"strings"
@@ -36,15 +36,15 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
filenames := strings.Split(reference, separator)
for _, filename := range filenames {
if filename == "-" {
- stdin, err = ioutil.ReadAll(os.Stdin)
+ stdin, err = io.ReadAll(os.Stdin)
if err != nil {
- return nil, fmt.Errorf("error buffering stdin: %v", err)
+ return nil, fmt.Errorf("error buffering stdin: %w", err)
}
continue
}
f, err := os.Open(filename)
if err != nil {
- return nil, fmt.Errorf("error opening %q: %v", filename, err)
+ return nil, fmt.Errorf("error opening %q: %w", filename, err)
}
f.Close()
}
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
index 2110a091d..0f4b7e329 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
@@ -1,10 +1,14 @@
package alltransports
import (
+ "fmt"
"strings"
- // register all known transports
- // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+
+ // Register all known transports.
+ // NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating
// a transport.
_ "github.com/containers/image/v5/directory"
_ "github.com/containers/image/v5/docker"
@@ -12,35 +16,34 @@ import (
_ "github.com/containers/image/v5/oci/archive"
_ "github.com/containers/image/v5/oci/layout"
_ "github.com/containers/image/v5/openshift"
+ _ "github.com/containers/image/v5/sif"
_ "github.com/containers/image/v5/tarball"
+ // The docker-daemon transport is registeredy by docker_daemon*.go
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
- "github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
- parts := strings.SplitN(imgName, ":", 2)
- if len(parts) != 2 {
- return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
+ transportName, withinTransport, valid := strings.Cut(imgName, ":")
+ if !valid {
+ return nil, fmt.Errorf(`Invalid image name %q, expected colon-separated transport:reference`, imgName)
}
- transport := transports.Get(parts[0])
+ transport := transports.Get(transportName)
if transport == nil {
- return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
+ return nil, fmt.Errorf(`Invalid image name %q, unknown transport %q`, imgName, transportName)
}
- return transport.ParseReference(parts[1])
+ return transport.ParseReference(withinTransport)
}
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
// the transport is unknown or when the input is invalid.
func TransportFromImageName(imageName string) types.ImageTransport {
// Keep this in sync with ParseImageName!
- parts := strings.SplitN(imageName, ":", 2)
- if len(parts) == 2 {
- return transports.Get(parts[0])
+ transportName, _, valid := strings.Cut(imageName, ":")
+ if valid {
+ return transports.Get(transportName)
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
index 82224052e..ffac6e0b8 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_docker_daemon_stub
// +build !containers_image_docker_daemon_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
index d13700799..ddc347bf3 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
@@ -1,3 +1,4 @@
+//go:build containers_image_docker_daemon_stub
// +build containers_image_docker_daemon_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
index 72432d1ef..2340702bd 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree && linux
// +build containers_image_ostree,linux
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
index f4a862bd4..8c4175188 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_ostree || !linux
// +build !containers_image_ostree !linux
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
index 7041eb876..1e399cdb0 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
index 67f0291cc..30802661f 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
@@ -1,3 +1,4 @@
+//go:build containers_image_storage_stub
// +build containers_image_storage_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go
index 46ee3710f..834f33b48 100644
--- a/vendor/github.com/containers/image/v5/transports/transports.go
+++ b/vendor/github.com/containers/image/v5/transports/transports.go
@@ -5,6 +5,7 @@ import (
"sort"
"sync"
+ "github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
)
@@ -66,22 +67,21 @@ func Register(t types.ImageTransport) {
// This is the generally recommended way to refer to images in the UI.
//
// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
+var deprecatedTransports = set.NewWithValues("atomic")
+
// ListNames returns a list of non deprecated transport names.
// Deprecated transports can be used, but are not presented to users.
func ListNames() []string {
kt.mu.Lock()
defer kt.mu.Unlock()
- deprecated := map[string]bool{
- "atomic": true,
- }
var names []string
for _, transport := range kt.transports {
- if !deprecated[transport.Name()] {
+ if !deprecatedTransports.Contains(transport.Name()) {
names = append(names, transport.Name())
}
}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 3e7abd34d..9a7a0da2b 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -11,7 +11,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
-// ImageTransport is a top-level namespace for ways to to store/load an image.
+// ImageTransport is a top-level namespace for ways to store/load an image.
// It should generally correspond to ImageSource/ImageDestination implementations.
//
// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
@@ -48,7 +48,7 @@ type ImageReference interface {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
- // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+ // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
StringWithinTransport() string
@@ -125,13 +125,20 @@ type BlobInfo struct {
URLs []string
Annotations map[string]string
MediaType string
+
+ // NOTE: The following fields contain desired _edits_ to blob infos.
+ // Conceptually then don't belong in the BlobInfo object at all;
+ // the edits should be provided specifically as parameters to the edit implementation.
+ // We can’t remove the fields without breaking compatibility, but don’t
+ // add any more.
+
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer's "compressed or not" should be preserved,
// possibly while changing the compression algorithm from one to another,
- // or if it should be compressed or decompressed. The field defaults to
- // preserve the original layer's compressedness.
+ // or if it should be changed to compressed or decompressed.
+ // The field defaults to preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
- // field out out of BlobInfo.
+ // field out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
@@ -142,8 +149,9 @@ type BlobInfo struct {
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer was encrypted/decrypted
// TODO: To remove together with CompressionOperation in re-design to
- // remove field out out of BlobInfo.
+ // remove field out of BlobInfo.
CryptoOperation LayerCrypto
+ // Before adding any fields to this struct, read the NOTE above.
}
// BICTransportScope encapsulates transport-dependent representation of a “scope†where blobs are or are not present.
@@ -177,24 +185,25 @@ type BICReplacementCandidate struct {
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
-// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
-// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
-// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
-// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
-// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
-// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
//
-// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
-// compress/decompress blobs for their own purposes.
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
//
-// - Known blob locations, managed by individual transports:
-// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
-// recording transport-specific information that allows the transport to reuse the blob in the future;
-// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
-// Each transport defines its own “scopes†within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
-// can be directly reused within a registry, or mounted across registries within a registry server.)
+// Each transport defines its own “scopes†within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cache should just fall back to copying the blobs the usual way.
@@ -299,7 +308,7 @@ type ImageDestination interface {
IgnoresEmbeddedDockerReference() bool
// PutBlob writes contents of stream and returns data representing the result.
- // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
@@ -334,6 +343,9 @@ type ImageDestination interface {
// MUST be called after PutManifest (signatures may reference manifest contents).
PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+ // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+ // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+ // original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
@@ -433,7 +445,7 @@ type ImageCloser interface {
Close() error
}
-// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
+// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
type ManifestUpdateOptions struct {
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
EmbeddedDockerReference reference.Named
@@ -445,7 +457,7 @@ type ManifestUpdateOptions struct {
// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
// only to make writing struct literals possible.
type ManifestUpdateInformation struct {
- Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
+ Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
}
@@ -462,7 +474,17 @@ type ImageInspectInfo struct {
Variant string
Os string
Layers []string
+ LayersData []ImageInspectLayer
Env []string
+ Author string
+}
+
+// ImageInspectLayer is a set of metadata describing an image layers' detail
+type ImageInspectLayer struct {
+ MIMEType string // "" if unknown.
+ Digest digest.Digest
+ Size int64 // -1 if unknown.
+ Annotations map[string]string
}
// DockerAuthConfig contains authorization information for connecting to a registry.
@@ -558,15 +580,24 @@ type SystemContext struct {
UserShortNameAliasConfPath string
// If set, short-name resolution in pkg/shortnames must follow the specified mode
ShortNameMode *ShortNameMode
- // If not "", overrides the default path for the authentication file, but only new format files
+ // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and
+ // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce
+ // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
+ // specific context.
+ PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
+ // If not "", overrides the default path for the registry authentication file, but only new format files
AuthFilePath string
- // if not "", overrides the default path for the authentication file, but with the legacy format;
+ // if not "", overrides the default path for the registry authentication file, but with the legacy format;
// the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
// but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
// in locations other than the home dir; openshift components should then set this field in those cases;
// this field is ignored if `AuthFilePath` is set (we favor the newer format);
// only reading of this data is supported;
LegacyFormatAuthFilePath string
+ // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed.
+ // This must not be set if AuthFilePath is set.
+ // Only credentials and credential helpers in this file apre processed, not any other configuration in this file.
+ DockerCompatAuthFilePath string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
@@ -595,12 +626,12 @@ type SystemContext struct {
// === docker.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
// a client certificate (ending with ".cert") and a client certificate key
- // (ending with ".key") used when talking to a Docker Registry.
+ // (ending with ".key") used when talking to a container registry.
DockerCertPath string
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
// Ignored if DockerCertPath is non-empty.
DockerPerHostCertDirPath string
- // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
DockerInsecureSkipTLSVerify OptionalBool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
// Ignored if DockerBearerRegistryToken is non-empty.
@@ -612,6 +643,7 @@ type SystemContext struct {
// if true, a V1 ping attempt isn't done to give users a better error. Default is false.
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
// in order to not break any existing docker's integration tests.
+ // Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect.
DockerDisableV1Ping bool
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
DockerDisableDestSchema1MIMETypes bool
@@ -619,6 +651,10 @@ type SystemContext struct {
DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string
+ // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
+ // Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
+ // when the digest for a blob is unknown.
+ DockerRegistryPushPrecomputeDigests bool
// === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"),
@@ -633,6 +669,8 @@ type SystemContext struct {
// === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true
DirForceCompress bool
+ // DirForceDecompress decompresses the image layers if set to true
+ DirForceDecompress bool
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 4afb3b90b..3743721fc 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 12
+ VersionMinor = 33
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..a7d8acbfc
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The libtrust Project Community Code of Conduct
+
+The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md
new file mode 100644
index 000000000..966f4f053
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the libtrust Project
+
+The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/ocicrypt/.gitignore b/vendor/github.com/containers/ocicrypt/.gitignore
new file mode 100644
index 000000000..b25c15b81
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml
new file mode 100644
index 000000000..bf39af836
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/.golangci.yml
@@ -0,0 +1,35 @@
+linters:
+ enable:
+ - depguard
+ - staticcheck
+ - unconvert
+ - gofmt
+ - goimports
+ - revive
+ - ineffassign
+ - govet
+ - unused
+ - misspell
+
+linters-settings:
+ depguard:
+ rules:
+ main:
+ files:
+ - $all
+ deny:
+ - pkg: "io/ioutil"
+
+ revive:
+ severity: error
+ rules:
+ - name: indent-error-flow
+ severity: warning
+ disabled: false
+
+ - name: error-strings
+ disabled: false
+
+ staticcheck:
+ # Suppress reports of deprecated packages
+ checks: ["-SA1019"]
diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml
deleted file mode 100644
index e4dd4a402..000000000
--- a/vendor/github.com/containers/ocicrypt/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-dist: bionic
-language: go
-
-os:
-- linux
-
-go:
- - "1.13.x"
- - "1.16.x"
-
-matrix:
- include:
- - os: linux
-
-addons:
- apt:
- packages:
- - gnutls-bin
- - softhsm2
-
-go_import_path: github.com/containers/ocicrypt
-
-install:
- - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
-
-script:
- - make
- - make check
- - make test
diff --git a/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/vendor/github.com/containers/ocicrypt/ADOPTERS.md
new file mode 100644
index 000000000..fa4b03bb8
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/ADOPTERS.md
@@ -0,0 +1,10 @@
+Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images:
+- [skopeo](https://github.com/containers/skopeo)
+- [buildah](https://github.com/containers/buildah)
+- [containerd](https://github.com/containerd/imgcrypt)
+- [nerdctl](https://github.com/containerd/nerdctl)
+- [distribution](https://github.com/distribution/distribution)
+
+Below are the list of projects that are in the process of adopting support:
+- [quay](https://github.com/quay/quay)
+- [kata-containers](https://github.com/kata-containers/kata-containers)
diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md
index 5131b5a37..d68f8dbda 100644
--- a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md
+++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md
@@ -1,3 +1,3 @@
## The OCIcrypt Library Project Community Code of Conduct
-The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
+The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS
index e6a7d1f0a..af38d03bf 100644
--- a/vendor/github.com/containers/ocicrypt/MAINTAINERS
+++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS
@@ -3,3 +3,4 @@
# Github ID, Name, Email Address
lumjjb, Brandon Lum, lumjjb@gmail.com
stefanberger, Stefan Berger, stefanb@linux.ibm.com
+arronwy, Arron Wang, arron.wang@intel.com
diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile
index dc9d98537..97ddeefbb 100644
--- a/vendor/github.com/containers/ocicrypt/Makefile
+++ b/vendor/github.com/containers/ocicrypt/Makefile
@@ -28,6 +28,7 @@ vendor:
go mod tidy
test:
+ go clean -testcache
go test ./... -test.v
generate-protobuf:
diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md
index 84cab7a40..b69d14e3b 100644
--- a/vendor/github.com/containers/ocicrypt/README.md
+++ b/vendor/github.com/containers/ocicrypt/README.md
@@ -34,6 +34,12 @@ The implementation for both symmetric and asymmetric encryption used in this lib
We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec.
+
+#### Keyprovider interface
+
+As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service.
+
+
## Security Issues
We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file.
diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md
index 30124c89f..ea98cb129 100644
--- a/vendor/github.com/containers/ocicrypt/SECURITY.md
+++ b/vendor/github.com/containers/ocicrypt/SECURITY.md
@@ -1,3 +1,3 @@
## Security and Disclosure Information Policy for the OCIcrypt Library Project
-The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
+The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
index da403d95d..b8436a8d5 100644
--- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
+++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
@@ -17,10 +17,11 @@
package blockcipher
import (
+ "errors"
+ "fmt"
"io"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// LayerCipherType is the ciphertype as specified in the layer metadata
@@ -95,9 +96,8 @@ func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) {
return v, ok
} else if v, ok := lbco.Private.CipherOptions[key]; ok {
return v, ok
- } else {
- return nil, false
}
+ return nil, false
}
func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer {
@@ -129,7 +129,7 @@ func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCi
}
return encDataReader, fin, err
}
- return nil, nil, errors.Errorf("unsupported cipher type: %s", typ)
+ return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ)
}
// Decrypt is the handler for the layer decryption routine
@@ -138,10 +138,10 @@ func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBloc
if typ == "" {
return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided")
}
- if c, ok := h.cipherMap[LayerCipherType(typ)]; ok {
+ if c, ok := h.cipherMap[typ]; ok {
return c.Decrypt(encDataReader, opt)
}
- return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ)
+ return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ)
}
// NewLayerBlockCipherHandler returns a new default handler
@@ -153,7 +153,7 @@ func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) {
var err error
h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256)
if err != nil {
- return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR")
+ return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err)
}
return &h, nil
diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
index 095a53e35..7db03e2ec 100644
--- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
+++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
@@ -22,12 +22,12 @@ import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
+ "errors"
"fmt"
"hash"
"io"
"github.com/containers/ocicrypt/utils"
- "github.com/pkg/errors"
)
// AESCTRLayerBlockCipher implements the AES CTR stream cipher
@@ -74,7 +74,7 @@ func (r *aesctrcryptor) Read(p []byte) (int, error) {
if !r.bc.encrypt {
if _, err := r.bc.hmac.Write(p[:o]); err != nil {
- r.bc.err = errors.Wrapf(err, "could not write to hmac")
+ r.bc.err = fmt.Errorf("could not write to hmac: %w", err)
return 0, r.bc.err
}
@@ -92,7 +92,7 @@ func (r *aesctrcryptor) Read(p []byte) (int, error) {
if r.bc.encrypt {
if _, err := r.bc.hmac.Write(p[:o]); err != nil {
- r.bc.err = errors.Wrapf(err, "could not write to hmac")
+ r.bc.err = fmt.Errorf("could not write to hmac: %w", err)
return 0, r.bc.err
}
@@ -120,13 +120,13 @@ func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts Laye
if !ok {
nonce = make([]byte, aes.BlockSize)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
- return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce")
+ return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err)
}
}
block, err := aes.NewCipher(key)
if err != nil {
- return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed")
+ return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err)
}
bc.reader = reader
diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go
index a789d052d..f7f29cd8d 100644
--- a/vendor/github.com/containers/ocicrypt/config/constructors.go
+++ b/vendor/github.com/containers/ocicrypt/config/constructors.go
@@ -17,11 +17,12 @@
package config
import (
- "github.com/containers/ocicrypt/crypto/pkcs11"
+ "errors"
+ "fmt"
"strings"
- "github.com/pkg/errors"
- "gopkg.in/yaml.v2"
+ "github.com/containers/ocicrypt/crypto/pkcs11"
+ "gopkg.in/yaml.v3"
)
// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys
@@ -85,7 +86,7 @@ func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Y
}
p11confYaml, err := yaml.Marshal(pkcs11Config)
if err != nil {
- return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml")
+ return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err)
}
dc = DecryptConfig{
@@ -223,7 +224,7 @@ func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig
func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) {
p11confYaml, err := yaml.Marshal(pkcs11Config)
if err != nil {
- return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml")
+ return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err)
}
dc := DecryptConfig{
diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go
index b454b3716..4785a831b 100644
--- a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go
+++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go
@@ -18,8 +18,7 @@ package config
import (
"encoding/json"
- "github.com/pkg/errors"
- "io/ioutil"
+ "fmt"
"os"
)
@@ -52,7 +51,7 @@ func parseConfigFile(filename string) (*OcicryptConfig, error) {
return nil, nil
}
- data, err := ioutil.ReadFile(filename)
+ data, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
@@ -72,7 +71,7 @@ func GetConfiguration() (*OcicryptConfig, error) {
if len(filename) > 0 {
ic, err = parseConfigFile(filename)
if err != nil {
- return nil, errors.Wrap(err, "Error while parsing keyprovider config file")
+ return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err)
}
} else {
return nil, nil
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
index 7fcd2e3af..072d7fe18 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
@@ -15,9 +15,9 @@ package pkcs11
import (
"fmt"
- "github.com/pkg/errors"
+
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// Pkcs11KeyFile describes the format of the pkcs11 (private) key file.
@@ -42,7 +42,7 @@ func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) {
p11uri := pkcs11uri.New()
err := p11uri.Parse(uri)
if err != nil {
- return nil, errors.Wrapf(err, "Could not parse Pkcs11URI from file")
+ return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err)
}
return p11uri, err
}
@@ -50,14 +50,14 @@ func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) {
// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key.
// The file has the following yaml format:
// pkcs11:
-// - uri :
+// - uri :
// An error is returned if the pkcs11 URI is malformed
func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) {
p11keyfile := Pkcs11KeyFile{}
- err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile)
+ err := yaml.Unmarshal(yamlstr, &p11keyfile)
if err != nil {
- return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile")
+ return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err)
}
p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri)
@@ -102,7 +102,7 @@ func GetDefaultModuleDirectories() []string {
"/usr/lib/softhsm/", // Debian,Ubuntu
}
- // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/
+ // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/
hosttype, ostype, q := getHostAndOsType()
if len(hosttype) > 0 {
dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q)
@@ -126,9 +126,9 @@ func GetDefaultModuleDirectoriesYaml(indent string) string {
func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) {
p11conf := Pkcs11Config{}
- err := yaml.Unmarshal([]byte(yamlstr), &p11conf)
+ err := yaml.Unmarshal(yamlstr, &p11conf)
if err != nil {
- return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config")
+ return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err)
}
return &p11conf, nil
}
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
index 448e88c7c..fe047a1e6 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
@@ -1,3 +1,4 @@
+//go:build cgo
// +build cgo
/*
@@ -25,6 +26,7 @@ import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"hash"
"net/url"
@@ -33,15 +35,12 @@ import (
"strings"
"github.com/miekg/pkcs11"
- "github.com/pkg/errors"
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
)
var (
// OAEPLabel defines the label we use for OAEP encryption; this cannot be changed
OAEPLabel = []byte("")
- // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed
- OAEPDefaultHash = "sha1"
// OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM
OAEPSha1Params = &pkcs11.OAEPParams{
@@ -69,20 +68,20 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri
)
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // The default is 'sha1'
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
hashfunc = sha1.New()
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
hashfunc = sha256.New()
hashalg = "sha256"
default:
- return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash)
+ return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash)
}
ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel)
if err != nil {
- return nil, "", errors.Wrapf(err, "rss.EncryptOAEP failed")
+ return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err)
}
return ciphertext, hashalg, nil
@@ -106,7 +105,7 @@ func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperatio
module, err := p11uri.GetModule()
if err != nil {
- return "", "", 0, errors.Wrap(err, "No module available in pkcs11 URI")
+ return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err)
}
slotid := int64(-1)
@@ -115,7 +114,7 @@ func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperatio
if ok {
slotid, err = strconv.ParseInt(slot, 10, 64)
if err != nil {
- return "", "", 0, errors.Wrap(err, "slot-id is not a valid number")
+ return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err)
}
if slotid < 0 {
return "", "", 0, fmt.Errorf("slot-id is a negative number")
@@ -140,21 +139,21 @@ func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, err
// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN
func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) {
- session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
+ session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
if err != nil {
- return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid)
+ return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err)
}
if len(pin) > 0 {
err = p11ctx.Login(session, pkcs11.CKU_USER, pin)
if err != nil {
_ = p11ctx.CloseSession(session)
- return 0, errors.Wrap(err, "Could not login to device")
+ return 0, fmt.Errorf("Could not login to device: %w", err)
}
}
return session, nil
}
-// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get
+// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get
// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise
// one slot after the other will be attempted and the first one where login succeeds will be used
func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) {
@@ -172,40 +171,40 @@ func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx
if err != nil {
p11Err := err.(pkcs11.Error)
if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED {
- return nil, 0, errors.Wrap(err, "Initialize failed")
+ return nil, 0, fmt.Errorf("Initialize failed: %w", err)
}
}
if slotid >= 0 {
session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin)
return p11ctx, session, err
- } else {
- slots, err := p11ctx.GetSlotList(true)
- if err != nil {
- return nil, 0, errors.Wrap(err, "GetSlotList failed")
- }
+ }
- tokenlabel, ok := p11uri.GetPathAttribute("token", false)
- if !ok {
- return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given")
- }
+ slots, err := p11ctx.GetSlotList(true)
+ if err != nil {
+ return nil, 0, fmt.Errorf("GetSlotList failed: %w", err)
+ }
- for _, slot := range slots {
- ti, err := p11ctx.GetTokenInfo(slot)
- if err != nil || ti.Label != tokenlabel {
- continue
- }
+ tokenlabel, ok := p11uri.GetPathAttribute("token", false)
+ if !ok {
+ return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given")
+ }
- session, err = pkcs11OpenSession(p11ctx, slot, pin)
- if err == nil {
- return p11ctx, session, err
- }
+ for _, slot := range slots {
+ ti, err := p11ctx.GetTokenInfo(slot)
+ if err != nil || ti.Label != tokenlabel {
+ continue
}
- if len(pin) > 0 {
- return nil, 0, errors.New("Could not create session to any slot and/or log in")
+
+ session, err = pkcs11OpenSession(p11ctx, slot, pin)
+ if err == nil {
+ return p11ctx, session, err
}
- return nil, 0, errors.New("Could not create session to any slot")
}
+ if len(pin) > 0 {
+ return nil, 0, errors.New("Could not create session to any slot and/or log in")
+ }
+ return nil, 0, errors.New("Could not create session to any slot")
}
func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) {
@@ -235,24 +234,24 @@ func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, ke
}
if err := p11ctx.FindObjectsInit(session, template); err != nil {
- return 0, errors.Wrap(err, "FindObjectsInit failed")
+ return 0, fmt.Errorf("FindObjectsInit failed: %w", err)
}
obj, _, err := p11ctx.FindObjects(session, 100)
if err != nil {
- return 0, errors.Wrap(err, "FindObjects failed")
+ return 0, fmt.Errorf("FindObjects failed: %w", err)
}
if err := p11ctx.FindObjectsFinal(session); err != nil {
- return 0, errors.Wrap(err, "FindObjectsFinal failed")
+ return 0, fmt.Errorf("FindObjectsFinal failed: %w", err)
}
if len(obj) > 1 {
- return 0, errors.Errorf("There are too many (=%d) keys with %s", len(obj), msg)
+ return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg)
} else if len(obj) == 1 {
return obj[0], nil
}
- return 0, errors.Errorf("Could not find any object with %s", msg)
+ return 0, fmt.Errorf("Could not find any object with %s", msg)
}
// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext
@@ -283,26 +282,26 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s
var oaep *pkcs11.OAEPParams
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // the default is sha1
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
oaep = OAEPSha1Params
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
oaep = OAEPSha256Params
hashalg = "sha256"
default:
- return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash)
+ return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash)
}
err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey)
if err != nil {
- return nil, "", errors.Wrap(err, "EncryptInit error")
+ return nil, "", fmt.Errorf("EncryptInit error: %w", err)
}
ciphertext, err := p11ctx.Encrypt(session, plaintext)
if err != nil {
- return nil, "", errors.Wrap(err, "Encrypt failed")
+ return nil, "", fmt.Errorf("Encrypt failed: %w", err)
}
return ciphertext, hashalg, nil
}
@@ -333,23 +332,23 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash
var oaep *pkcs11.OAEPParams
- // the default is sha1
+ // An empty string from the Hash in the JSON historically defaults to sha1.
switch hashalg {
case "sha1", "":
oaep = OAEPSha1Params
case "sha256":
oaep = OAEPSha256Params
default:
- return nil, errors.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg)
+ return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg)
}
err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey)
if err != nil {
- return nil, errors.Wrapf(err, "DecryptInit failed")
+ return nil, fmt.Errorf("DecryptInit failed: %w", err)
}
plaintext, err := p11ctx.Decrypt(session, ciphertext)
if err != nil {
- return nil, errors.Wrapf(err, "Decrypt failed")
+ return nil, fmt.Errorf("Decrypt failed: %w", err)
}
return plaintext, err
}
@@ -375,19 +374,19 @@ type Pkcs11Recipient struct {
// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the
// following format:
// {
-// recipients: [ // recipient list
-// {
-// "version": 0,
-// "blob": ,
-// "hash":
-// } ,
-// {
-// "version": 0,
-// "blob": ,
-// "hash":
-// } ,
-// [...]
-// ]
+// recipients: [ // recipient list
+// {
+// "version": 0,
+// "blob": ,
+// "hash":
+// } ,
+// {
+// "version": 0,
+// "blob": ,
+// "hash":
+// } ,
+// [...]
+// ]
// }
func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
var (
@@ -404,15 +403,12 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
case *Pkcs11KeyFileObject:
ciphertext, hashalg, err = publicEncryptOAEP(pkey, data)
default:
- err = errors.Errorf("Unsupported key object type for pkcs11 public key")
+ err = fmt.Errorf("Unsupported key object type for pkcs11 public key")
}
if err != nil {
return nil, err
}
- if hashalg == OAEPDefaultHash {
- hashalg = ""
- }
recipient := Pkcs11Recipient{
Version: 0,
Blob: base64.StdEncoding.EncodeToString(ciphertext),
@@ -427,30 +423,32 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key.
// The input pkcs11blobstr is a string with the following format:
// {
-// recipients: [ // recipient list
-// {
-// "version": 0,
-// "blob": ,
-// "hash":
-// } ,
-// {
-// "version": 0,
-// "blob": ,
-// "hash":
-// } ,
-// [...]
+// recipients: [ // recipient list
+// {
+// "version": 0,
+// "blob": ,
+// "hash":
+// } ,
+// {
+// "version": 0,
+// "blob": ,
+// "hash":
+// } ,
+// [...]
// }
+// Note: More recent versions of this code explicitly write 'sha1'
+// while older versions left it empty in case of 'sha1'.
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
pkcs11blob := Pkcs11Blob{}
err := json.Unmarshal(pkcs11blobstr, &pkcs11blob)
if err != nil {
- return nil, errors.Wrapf(err, "Could not parse Pkcs11Blob")
+ return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err)
}
switch pkcs11blob.Version {
case 0:
// latest supported version
default:
- return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version)
+ return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version)
}
// since we do trial and error, collect all encountered errors
errs := ""
@@ -460,7 +458,7 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte,
case 0:
// last supported version
default:
- return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version)
+ return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version)
}
ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob)
@@ -483,5 +481,5 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte,
}
}
- return nil, errors.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs)
+ return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs)
}
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go
index 6edf75269..6cf0aa2a9 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go
@@ -1,3 +1,4 @@
+//go:build !cgo
// +build !cgo
/*
@@ -18,14 +19,12 @@
package pkcs11
-import (
- "github.com/pkg/errors"
-)
+import "fmt"
func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
- return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build")
+ return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build")
}
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
- return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build")
+ return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build")
}
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go
index 306e372d5..231da2317 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go
@@ -17,12 +17,11 @@
package pkcs11
import (
+ "fmt"
"os"
"runtime"
"strings"
"sync"
-
- "github.com/pkg/errors"
)
var (
@@ -45,7 +44,7 @@ func setEnvVars(env map[string]string) ([]string, error) {
err := os.Setenv(k, v)
if err != nil {
restoreEnv(oldenv)
- return nil, errors.Wrapf(err, "Could not set environment variable '%s' to '%s'", k, v)
+ return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err)
}
}
@@ -106,6 +105,8 @@ func getHostAndOsType() (string, string, string) {
ht = "x86_64"
case "ppc64le":
ht = "powerpc64le"
+ case "riscv64":
+ ht = "riscv64"
case "s390x":
ht = "s390x"
}
diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go
index f5142cc8d..b6fa9db40 100644
--- a/vendor/github.com/containers/ocicrypt/encryption.go
+++ b/vendor/github.com/containers/ocicrypt/encryption.go
@@ -19,23 +19,23 @@ package ocicrypt
import (
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
- keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
- "github.com/containers/ocicrypt/keywrap/keyprovider"
"io"
"strings"
"github.com/containers/ocicrypt/blockcipher"
"github.com/containers/ocicrypt/config"
+ keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/keywrap/jwe"
+ "github.com/containers/ocicrypt/keywrap/keyprovider"
"github.com/containers/ocicrypt/keywrap/pgp"
"github.com/containers/ocicrypt/keywrap/pkcs11"
"github.com/containers/ocicrypt/keywrap/pkcs7"
"github.com/opencontainers/go-digest"
- log "github.com/sirupsen/logrus"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
)
// EncryptLayerFinalizer is a finalizer run to return the annotations to set for
@@ -133,16 +133,19 @@ func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, des
}
privOptsData, err = json.Marshal(opts.Private)
if err != nil {
- return nil, errors.Wrapf(err, "could not JSON marshal opts")
+ return nil, fmt.Errorf("could not JSON marshal opts: %w", err)
}
pubOptsData, err = json.Marshal(opts.Public)
if err != nil {
- return nil, errors.Wrapf(err, "could not JSON marshal opts")
+ return nil, fmt.Errorf("could not JSON marshal opts: %w", err)
}
}
newAnnotations := make(map[string]string)
keysWrapped := false
+ if len(keyWrapperAnnotations) == 0 {
+ return nil, errors.New("missing Annotations needed for decryption")
+ }
for annotationsID, scheme := range keyWrapperAnnotations {
b64Annotations := desc.Annotations[annotationsID]
keywrapper := GetKeyWrapper(scheme)
@@ -211,6 +214,9 @@ func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocisp
func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) {
privKeyGiven := false
errs := ""
+ if len(keyWrapperAnnotations) == 0 {
+ return nil, errors.New("missing Annotations needed for decryption")
+ }
for annotationsID, scheme := range keyWrapperAnnotations {
b64Annotation := desc.Annotations[annotationsID]
if b64Annotation != "" {
@@ -237,9 +243,9 @@ func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor)
}
}
if !privKeyGiven {
- return nil, errors.New("missing private key needed for decryption")
+ return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs)
}
- return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs)
+ return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs)
}
func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) {
@@ -270,7 +276,7 @@ func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64An
}
return optsData, nil
}
- return nil, errors.Errorf("no suitable key found for decrypting layer key:\n%s", errs)
+ return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs)
}
// commonEncryptLayer is a function to encrypt the plain layer using a new random
@@ -305,7 +311,7 @@ func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsDa
privOpts := blockcipher.PrivateLayerBlockCipherOptions{}
err := json.Unmarshal(privOptsData, &privOpts)
if err != nil {
- return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData")
+ return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err)
}
lbch, err := blockcipher.NewLayerBlockCipherHandler()
@@ -317,7 +323,7 @@ func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsDa
if len(pubOptsData) > 0 {
err := json.Unmarshal(pubOptsData, &pubOpts)
if err != nil {
- return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData")
+ return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err)
}
}
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
index 02be18591..849e0eb15 100644
--- a/vendor/github.com/containers/ocicrypt/go.mod
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -1,21 +1,29 @@
module github.com/containers/ocicrypt
-go 1.12
+go 1.21
require (
- github.com/golang/protobuf v1.4.3
- github.com/google/go-cmp v0.5.2 // indirect
- github.com/miekg/pkcs11 v1.0.3
+ github.com/go-jose/go-jose/v4 v4.0.2
+ github.com/golang/protobuf v1.5.4
+ github.com/miekg/pkcs11 v1.1.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.1
- github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.7.0
+ github.com/opencontainers/image-spec v1.1.0
+ github.com/sirupsen/logrus v1.9.3
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
- github.com/stretchr/testify v1.3.0
+ github.com/stretchr/testify v1.9.0
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
- golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
- golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
- google.golang.org/grpc v1.33.2
- gopkg.in/square/go-jose.v2 v2.5.1
- gopkg.in/yaml.v2 v2.4.0
+ golang.org/x/crypto v0.24.0
+ golang.org/x/term v0.21.0
+ google.golang.org/grpc v1.64.0
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ golang.org/x/net v0.23.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
)
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
index 7153900da..86e7c07ea 100644
--- a/vendor/github.com/containers/ocicrypt/go.sum
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -1,117 +1,49 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
+github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go
index b9d55539a..3bba4669b 100644
--- a/vendor/github.com/containers/ocicrypt/gpg.go
+++ b/vendor/github.com/containers/ocicrypt/gpg.go
@@ -17,16 +17,17 @@
package ocicrypt
import (
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
+ "sync"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"golang.org/x/term"
)
@@ -78,9 +79,8 @@ func GuessGPGVersion() GPGVersion {
return GPGv2
} else if err := exec.Command("gpg", "--version").Run(); err == nil {
return GPGv1
- } else {
- return GPGVersionUndetermined
}
+ return GPGVersionUndetermined
}
// NewGPGClient creates a new GPGClient object representing the given version
@@ -132,7 +132,7 @@ func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte
rfile, wfile, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "could not create pipe")
+ return nil, fmt.Errorf("could not create pipe: %w", err)
}
defer func() {
rfile.Close()
@@ -272,8 +272,8 @@ func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) {
return nil, err
}
- stdoutstr, err2 := ioutil.ReadAll(stdout)
- stderrstr, _ := ioutil.ReadAll(stderr)
+ stdoutstr, err2 := io.ReadAll(stdout)
+ stderrstr, _ := io.ReadAll(stderr)
if err := cmd.Wait(); err != nil {
return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr))
@@ -310,9 +310,15 @@ func resolveRecipients(gc GPGClient, recipients []string) []string {
return result
}
-var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`)
+var (
+ onceRegexp sync.Once
+ emailPattern *regexp.Regexp
+)
func extractEmailFromDetails(details []byte) string {
+ onceRegexp.Do(func() {
+ emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`)
+ })
loc := emailPattern.FindSubmatchIndex(details)
if len(loc) == 0 {
return ""
@@ -352,7 +358,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault
}
keywrapper := GetKeyWrapper(scheme)
if keywrapper == nil {
- return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme)
+ return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme)
}
keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets)
if err != nil {
@@ -411,7 +417,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault
if !found && len(b64pgpPackets) > 0 && mustFindKey {
ids := uint64ToStringArray("0x%x", keyIds)
- return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", "))
+ return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", "))
}
}
}
diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go
index dd9a10007..f1bd0d989 100644
--- a/vendor/github.com/containers/ocicrypt/gpgvault.go
+++ b/vendor/github.com/containers/ocicrypt/gpgvault.go
@@ -18,9 +18,9 @@ package ocicrypt
import (
"bytes"
- "io/ioutil"
+ "fmt"
+ "os"
- "github.com/pkg/errors"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/packet"
)
@@ -55,7 +55,7 @@ func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error {
r := bytes.NewReader(gpgSecretKeyRingData)
entityList, err := openpgp.ReadKeyRing(r)
if err != nil {
- return errors.Wrapf(err, "could not read keyring")
+ return fmt.Errorf("could not read keyring: %w", err)
}
g.entityLists = append(g.entityLists, entityList)
g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData)
@@ -76,7 +76,7 @@ func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte)
// AddSecretKeyRingFiles adds the secret key rings given their filenames
func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error {
for _, filename := range filenames {
- gpgSecretKeyRingData, err := ioutil.ReadFile(filename)
+ gpgSecretKeyRingData, err := os.ReadFile(filename)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
index 41d0f1b3a..c1bdd6fbe 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
@@ -18,12 +18,13 @@ package jwe
import (
"crypto/ecdsa"
+ "errors"
+ "fmt"
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
- "github.com/pkg/errors"
- jose "gopkg.in/square/go-jose.v2"
+ "github.com/go-jose/go-jose/v4"
)
type jweKeyWrapper struct {
@@ -54,17 +55,21 @@ func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]
encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil)
if err != nil {
- return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed")
+ return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err)
}
jwe, err := encrypter.Encrypt(optsData)
if err != nil {
- return nil, errors.Wrapf(err, "JWE Encrypt failed")
+ return nil, fmt.Errorf("JWE Encrypt failed: %w", err)
}
return []byte(jwe.FullSerialize()), nil
}
func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) {
- jwe, err := jose.ParseEncrypted(string(jweString))
+ // cf. list of algorithms in func addPubKeys() below
+ keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW}
+ // accept all algorithms defined in RFC 7518, section 5.1
+ contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM}
+ jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption)
if err != nil {
return nil, errors.New("jose.ParseEncrypted failed")
}
@@ -122,9 +127,24 @@ func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error {
}
alg := jose.RSA_OAEP
- switch key.(type) {
+ switch key := key.(type) {
case *ecdsa.PublicKey:
alg = jose.ECDH_ES_A256KW
+ case *jose.JSONWebKey:
+ if key.Algorithm != "" {
+ alg = jose.KeyAlgorithm(key.Algorithm)
+ switch alg {
+ /* accepted algorithms */
+ case jose.RSA_OAEP:
+ case jose.RSA_OAEP_256:
+ case jose.ECDH_ES_A128KW:
+ case jose.ECDH_ES_A192KW:
+ case jose.ECDH_ES_A256KW:
+ /* all others are rejected */
+ default:
+ return fmt.Errorf("%s is an unsupported JWE key algorithm", alg)
+ }
+ }
}
*joseRecipients = append(*joseRecipients, jose.Recipient{
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
index 3b4c47ed4..6ac0fcb95 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
@@ -19,12 +19,14 @@ package keyprovider
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
+
"github.com/containers/ocicrypt/config"
keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider"
- "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
@@ -112,19 +114,18 @@ func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []b
if kw.attrs.Command != nil {
protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command)
if err != nil {
- return nil, errors.Wrap(err, "error while retrieving keyprovider protocol command output")
+ return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err)
}
return protocolOuput.KeyWrapResults.Annotation, nil
} else if kw.attrs.Grpc != "" {
protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap)
if err != nil {
- return nil, errors.Wrap(err, "error while retrieving keyprovider protocol grpc output")
+ return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err)
}
return protocolOuput.KeyWrapResults.Annotation, nil
- } else {
- return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
+ return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
return nil, nil
@@ -160,9 +161,8 @@ func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString
}
return protocolOuput.KeyUnwrapResults.OptsData, nil
- } else {
- return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
+ return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) {
@@ -170,7 +170,7 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide
var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput
cc, err := grpc.Dial(connString, grpc.WithInsecure())
if err != nil {
- return nil, errors.Wrap(err, "error while dialing rpc server")
+ return nil, fmt.Errorf("error while dialing rpc server: %w", err)
}
defer func() {
derr := cc.Close()
@@ -187,12 +187,12 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide
if operation == OpKeyWrap {
grpcOutput, err = client.WrapKey(context.Background(), req)
if err != nil {
- return nil, errors.Wrap(err, "Error from grpc method")
+ return nil, fmt.Errorf("Error from grpc method: %w", err)
}
} else if operation == OpKeyUnwrap {
grpcOutput, err = client.UnWrapKey(context.Background(), req)
if err != nil {
- return nil, errors.Wrap(err, "Error from grpc method")
+ return nil, fmt.Errorf("Error from grpc method: %w", err)
}
} else {
return nil, errors.New("Unsupported operation")
@@ -201,7 +201,7 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide
respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput()
err = json.Unmarshal(respBytes, &protocolOuput)
if err != nil {
- return nil, errors.Wrap(err, "Error while unmarshalling grpc method output")
+ return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err)
}
return &protocolOuput, nil
@@ -216,7 +216,7 @@ func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command)
}
err = json.Unmarshal(respBytes, &protocolOuput)
if err != nil {
- return nil, errors.Wrap(err, "Error while unmarshalling binary executable command output")
+ return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err)
}
return &protocolOuput, nil
}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
index 275a3d8b9..4ab9bd978 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
@@ -21,16 +21,15 @@ import (
"crypto"
"crypto/rand"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"net/mail"
"strconv"
"strings"
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
- "github.com/pkg/errors"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/packet"
)
@@ -64,7 +63,7 @@ func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]
ciphertext := new(bytes.Buffer)
el, err := kw.createEntityList(ec)
if err != nil {
- return nil, errors.Wrap(err, "unable to create entity list")
+ return nil, fmt.Errorf("unable to create entity list: %w", err)
}
if len(el) == 0 {
// nothing to do -- not an error
@@ -100,7 +99,7 @@ func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) (
r := bytes.NewBuffer(pgpPrivateKey)
entityList, err := openpgp.ReadKeyRing(r)
if err != nil {
- return nil, errors.Wrap(err, "unable to parse private keys")
+ return nil, fmt.Errorf("unable to parse private keys: %w", err)
}
var prompt openpgp.PromptFunction
@@ -126,7 +125,7 @@ func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) (
continue
}
// we get the plain key options back
- optsData, err := ioutil.ReadAll(md.UnverifiedBody)
+ optsData, err := io.ReadAll(md.UnverifiedBody)
if err != nil {
continue
}
@@ -142,7 +141,7 @@ func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, er
for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") {
pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket)
if err != nil {
- return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet")
+ return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err)
}
newids, err := kw.getKeyIDs(pgpPacket)
if err != nil {
@@ -166,7 +165,7 @@ ParsePackets:
break ParsePackets
}
if err != nil {
- return []uint64{}, errors.Wrapf(err, "packets.Next() failed")
+ return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err)
}
switch p := p.(type) {
case *packet.EncryptedKey:
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
index 803b90865..b9a83c536 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
@@ -17,12 +17,13 @@
package pkcs11
import (
+ "errors"
+ "fmt"
+
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
-
- "github.com/pkg/errors"
)
type pkcs11KeyWrapper struct {
@@ -40,7 +41,11 @@ func NewKeyWrapper() keywrap.KeyWrapper {
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
// describe the symmetric key used for encrypting the layer
func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
- pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...))
+ // append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image)
+ // can't race writing to the same backing array.
+ pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"])
+ pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...)
+ pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys)
if err != nil {
return nil, err
}
@@ -51,7 +56,7 @@ func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte)
jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData)
if err != nil {
- return nil, errors.Wrapf(err, "PKCS11 EncryptMulitple failed")
+ return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err)
}
return jsonString, nil
}
@@ -91,7 +96,7 @@ func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byt
return plaintext, nil
}
- return nil, errors.Wrapf(err, "PKCS11: No suitable private key found for decryption")
+ return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err)
}
func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool {
@@ -139,7 +144,7 @@ func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, erro
return pkcs11Keys, nil
}
-func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error){
+func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) {
if _, ok := dcparameters["pkcs11-config"]; ok {
return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0])
}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
index 1feae462b..603925dfe 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
@@ -19,11 +19,12 @@ package pkcs7
import (
"crypto"
"crypto/x509"
+ "errors"
+ "fmt"
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
- "github.com/pkg/errors"
"go.mozilla.org/pkcs7"
)
@@ -104,7 +105,7 @@ func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byt
p7, err := pkcs7.Parse(pkcs7Packet)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse PKCS7 packet")
+ return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err)
}
for idx, privKey := range privKeys {
diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go
index 330069d49..c0c171824 100644
--- a/vendor/github.com/containers/ocicrypt/spec/spec.go
+++ b/vendor/github.com/containers/ocicrypt/spec/spec.go
@@ -3,10 +3,18 @@ package spec
const (
// MediaTypeLayerEnc is MIME type used for encrypted layers.
MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted"
- // MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers.
+ // MediaTypeLayerGzipEnc is MIME type used for encrypted gzip-compressed layers.
MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted"
+ // MediaTypeLayerZstdEnc is MIME type used for encrypted zstd-compressed layers.
+ MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted"
// MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers.
MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted"
- // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers.
+ // MediaTypeLayerNonDistributableGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers.
MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted"
+ // MediaTypeLayerNonDistributableZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers.
+ MediaTypeLayerNonDistributableZstdEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted"
+ // MediaTypeLayerNonDistributableZsdtEnc is MIME type used for non distributable encrypted zstd-compressed layers.
+ //
+ // Deprecated: Use [MediaTypeLayerNonDistributableZstdEnc].
+ MediaTypeLayerNonDistributableZsdtEnc = MediaTypeLayerNonDistributableZstdEnc
)
diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go
index 078c34799..c6265168a 100644
--- a/vendor/github.com/containers/ocicrypt/utils/ioutils.go
+++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go
@@ -18,9 +18,9 @@ package utils
import (
"bytes"
+ "fmt"
"io"
"os/exec"
- "github.com/pkg/errors"
)
// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns
@@ -44,13 +44,15 @@ type Runner struct{}
// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists.
func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) {
var out bytes.Buffer
+ var stderr bytes.Buffer
stdInputBuffer := bytes.NewBuffer(input)
cmd := exec.Command(cmdName, args...)
cmd.Stdin = stdInputBuffer
cmd.Stdout = &out
+ cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
- return nil, errors.Wrapf(err, "Error while running command: %s", cmdName)
+ return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err)
}
return out.Bytes(), nil
}
diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go
index 38633b19b..050aa885e 100644
--- a/vendor/github.com/containers/ocicrypt/utils/testing.go
+++ b/vendor/github.com/containers/ocicrypt/utils/testing.go
@@ -24,17 +24,25 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
+ "fmt"
"math/big"
"time"
-
- "github.com/pkg/errors"
)
// CreateRSAKey creates an RSA key
func CreateRSAKey(bits int) (*rsa.PrivateKey, error) {
key, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
- return nil, errors.Wrap(err, "rsa.GenerateKey failed")
+ return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err)
+ }
+ return key, nil
+}
+
+// CreateECDSAKey creates an elliptic curve key for the given curve
+func CreateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) {
+ key, err := ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err)
}
return key, nil
}
@@ -49,7 +57,7 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte
pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
if err != nil {
- return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed")
+ return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err)
}
privData := x509.MarshalPKCS1PrivateKey(key)
@@ -69,7 +77,7 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte
if len(password) > 0 {
block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility
if err != nil {
- return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed")
+ return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err)
}
} else {
block = &pem.Block{
@@ -86,19 +94,19 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte
// CreateECDSATestKey creates and elliptic curve key for the given curve and returns
// the public and private key in DER format
func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) {
- key, err := ecdsa.GenerateKey(curve, rand.Reader)
+ key, err := CreateECDSAKey(curve)
if err != nil {
- return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed")
+ return nil, nil, err
}
pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
if err != nil {
- return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed")
+ return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err)
}
privData, err := x509.MarshalECPrivateKey(key)
if err != nil {
- return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed")
+ return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err)
}
return pubData, privData, nil
@@ -108,7 +116,7 @@ func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) {
func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
- return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed")
+ return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err)
}
ca := &x509.Certificate{
@@ -154,12 +162,12 @@ func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateK
certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey)
if err != nil {
- return nil, errors.Wrap(err, "x509.CreateCertificate failed")
+ return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err)
}
cert, err := x509.ParseCertificate(certDER)
if err != nil {
- return nil, errors.Wrap(err, "x509.ParseCertificate failed")
+ return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err)
}
return cert, nil
diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go
index 07fe6d367..f653f2efc 100644
--- a/vendor/github.com/containers/ocicrypt/utils/utils.go
+++ b/vendor/github.com/containers/ocicrypt/utils/utils.go
@@ -21,22 +21,21 @@ import (
"crypto/x509"
"encoding/base64"
"encoding/pem"
+ "errors"
"fmt"
"strings"
"github.com/containers/ocicrypt/crypto/pkcs11"
-
- "github.com/pkg/errors"
+ "github.com/go-jose/go-jose/v4"
"golang.org/x/crypto/openpgp"
- json "gopkg.in/square/go-jose.v2"
)
// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key
func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
- jwk := json.JSONWebKey{}
+ jwk := jose.JSONWebKey{}
err := jwk.UnmarshalJSON(privKey)
if err != nil {
- return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
+ return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)
}
if jwk.IsPublic() {
return nil, fmt.Errorf("%s: JWK is not a private key", prefix)
@@ -46,10 +45,10 @@ func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
// parseJWKPublicKey parses the input byte array as a JWK
func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) {
- jwk := json.JSONWebKey{}
+ jwk := jose.JSONWebKey{}
err := jwk.UnmarshalJSON(privKey)
if err != nil {
- return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
+ return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)
}
if !jwk.IsPublic() {
return nil, fmt.Errorf("%s: JWK is not a public key", prefix)
@@ -97,11 +96,11 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{
var der []byte
if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility
if privKeyPassword == nil {
- return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix)
+ return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix)
}
der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility
if err != nil {
- return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix)
+ return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix)
}
} else {
der = block.Bytes
@@ -111,7 +110,7 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{
if err != nil {
key, err = x509.ParsePKCS1PrivateKey(der)
if err != nil {
- return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix)
+ return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err)
}
}
} else {
@@ -145,7 +144,7 @@ func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) {
if block != nil {
key, err = x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
- return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix)
+ return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err)
}
} else {
key, err = parseJWKPublicKey(pubKey, prefix)
@@ -179,7 +178,7 @@ func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error
}
x509Cert, err = x509.ParseCertificate(block.Bytes)
if err != nil {
- return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix)
+ return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err)
}
}
return x509Cert, err
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 96714c2fc..0fed08c3b 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -17,19 +17,15 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
- FEDORA_NAME: "fedora-34"
- PRIOR_FEDORA_NAME: "fedora-33"
- UBUNTU_NAME: "ubuntu-2104"
- PRIOR_UBUNTU_NAME: "ubuntu-2010"
+ FEDORA_NAME: "fedora-39"
+ DEBIAN_NAME: "debian-13"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- _BUILT_IMAGE_SUFFIX: "c6032583541653504"
- FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${_BUILT_IMAGE_SUFFIX}"
+ IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
+ DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
@@ -57,93 +53,92 @@ gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
-fedora_testing_task: &fedora_testing
- alias: fedora_testing
- name: &std_test_name "${OS_NAME} ${TEST_DRIVER}"
+linux_testing: &linux_testing
depends_on:
- lint
-
gce_instance: # Only need to specify differences from defaults (above)
image_name: "${VM_IMAGE}"
+ # Separate scripts for separate outputs, makes debugging easier.
+ setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
+
+ always:
+ df_script: '${_DFCMD} || true'
+ rh_audit_log_script: '${_RAUDITCMD} || true'
+ debian_audit_log_script: '${_UAUDITCMD} || true'
+ journal_log_script: '${_JOURNALCMD} || true'
+
+
+fedora_testing_task: &fedora_testing
+ <<: *linux_testing
+ alias: fedora_testing
+ name: &std_test_name "${OS_NAME} ${TEST_DRIVER}"
env:
OS_NAME: "${FEDORA_NAME}"
VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}"
-
# Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix: &test_matrix
- env:
TEST_DRIVER: "vfs"
- env:
TEST_DRIVER: "overlay"
+ - env:
+ TEST_DRIVER: "overlay-transient"
- env:
TEST_DRIVER: "fuse-overlay"
- env:
TEST_DRIVER: "fuse-overlay-whiteout"
-
- # Separate scripts for separate outputs, makes debugging easier.
- setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
- build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
-
- always:
- df_script: '${_DFCMD} || true'
- rh_audit_log_script: '${_RAUDITCMD} || true'
- ubuntu_audit_log_script: '${_UAUDITCMD} || true'
- journal_log_script: '${_JOURNALCMD} || true'
-
-
-prior_fedora_testing_task:
- <<: *fedora_testing
- alias: prior_fedora_testing
- name: *std_test_name
- env:
- OS_NAME: "${PRIOR_FEDORA_NAME}"
- VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ - env:
+ TEST_DRIVER: "btrfs"
-ubuntu_testing_task: &ubuntu_testing
- <<: *fedora_testing
- alias: ubuntu_testing
+# aufs was dropped between 20.04 and 22.04, can't test it
+debian_testing_task: &debian_testing
+ <<: *linux_testing
+ alias: debian_testing
name: *std_test_name
env:
- OS_NAME: "${UBUNTU_NAME}"
- VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}"
+ OS_NAME: "${DEBIAN_NAME}"
+ VM_IMAGE: "${DEBIAN_CACHE_IMAGE_NAME}"
+ # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix:
- env:
TEST_DRIVER: "vfs"
- env:
TEST_DRIVER: "overlay"
-
-
-prior_ubuntu_testing_task:
- <<: *ubuntu_testing
- alias: prior_ubuntu_testing
- name: *std_test_name
- env:
- OS_NAME: "${PRIOR_UBUNTU_NAME}"
- VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
+ - env:
+ TEST_DRIVER: "fuse-overlay"
+ - env:
+ TEST_DRIVER: "fuse-overlay-whiteout"
+ - env:
+ TEST_DRIVER: "btrfs"
lint_task:
+ alias: lint
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
- image: golang:1.15
+ image: golang
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: |
- echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
apt-get update
- apt-get install -y libbtrfs-dev libdevmapper-dev
- test_script: make lint
+ apt-get install -y libbtrfs-dev
+ test_script: |
+ make TAGS=regex_precompile local-validate
+ make lint
+ make clean
# Update metadata on VM images referenced by this repository state
meta_task:
+ alias: meta
container:
- image: "quay.io/libpod/imgts:master"
+ image: "quay.io/libpod/imgts:latest"
cpu: 1
memory: 1
@@ -151,9 +146,7 @@ meta_task:
# Space-separated list of images used by this repository state
IMGNAMES: |-
${FEDORA_CACHE_IMAGE_NAME}
- ${PRIOR_FEDORA_CACHE_IMAGE_NAME}
- ${UBUNTU_CACHE_IMAGE_NAME}
- ${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
+ ${DEBIAN_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
@@ -165,26 +158,54 @@ meta_task:
vendor_task:
+ alias: vendor
container:
- image: golang:1.15
+ image: golang
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: make vendor
test_script: hack/tree_status.sh
+cross_task:
+ alias: cross
+ container:
+ image: golang:1.22
+ build_script: make cross
+
+gofix_task:
+ alias: gofix
+ container:
+ image: golang:1.22
+ build_script: go fix ./...
+ test_script: git diff --exit-code
+
+codespell_task:
+ alias: codespell
+ container:
+ image: python
+ build_script: pip install codespell
+ test_script: codespell
+
-# Represent overall pass/fail status from required dependent tasks
+# Status aggregator for all tests. This task simply ensures a defined
+# set of tasks all passed, and allows confirming that based on the status
+# of this task.
success_task:
+ alias: success
+ # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT.
+ # Ref: https://github.com/openshift/release/pull/49820
+ name: "Total Success"
depends_on:
- lint
- fedora_testing
- - prior_fedora_testing
- - ubuntu_testing
- - prior_ubuntu_testing
+ - debian_testing
- meta
- vendor
+ - cross
+ - gofix
+ - codespell
container:
- image: golang:1.15
+ image: golang:1.21
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true
diff --git a/vendor/github.com/containers/storage/.codespellrc b/vendor/github.com/containers/storage/.codespellrc
new file mode 100644
index 000000000..2af969196
--- /dev/null
+++ b/vendor/github.com/containers/storage/.codespellrc
@@ -0,0 +1,3 @@
+[codespell]
+skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS
+ignore-words-list = afile,flate,prevend,Plack,worl
diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml
index cd4638a39..ec11f5dae 100644
--- a/vendor/github.com/containers/storage/.golangci.yml
+++ b/vendor/github.com/containers/storage/.golangci.yml
@@ -1,37 +1,7 @@
---
run:
concurrency: 6
- deadline: 5m
+ timeout: 5m
linters:
- enable-all: true
- disable:
- - dogsled
- - dupl
- - errcheck
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - gocritic
- - gocyclo
- - godox
- - gomnd
- - gosec
- - gosimple
- - govet
- - ineffassign
- - lll
- - maligned
- - misspell
- - nakedret
- - prealloc
- - scopelint
- - staticcheck
- - structcheck
- - stylecheck
- - unconvert
- - unparam
- - unused
- - varcheck
- - whitespace
- - wsl
+ enable:
+ - gofumpt
diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
index be0791620..f4f7df4b8 100644
--- a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
+++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
@@ -1,3 +1,3 @@
## The Containers Storage Project Community Code of Conduct
-The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
+The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 581961fed..64c01b67f 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -1,114 +1,93 @@
-export GO111MODULE=off
-export GOPROXY=https://proxy.golang.org
-
.PHONY: \
all \
binary \
clean \
+ codespell \
+ containers-storage \
cross \
default \
docs \
gccgo \
help \
+ install \
+ install.docs \
install.tools \
+ lint \
local-binary \
local-cross \
local-gccgo \
+ local-test \
local-test-integration \
local-test-unit \
local-validate \
- lint \
- test \
test-integration \
test-unit \
validate \
- vendor
+ vendor \
+ vendor-in-container
-PACKAGE := github.com/containers/storage
-GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
-EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
NATIVETAGS :=
-AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh)
+AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
-TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
+TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
-# Go module support: set `-mod=vendor` to use the vendored sources
-ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
- GO:=GO111MODULE=on $(GO)
- MOD_VENDOR=-mod=vendor
-endif
+# N/B: This value is managed by Renovate, manual changes are
+# possible, as long as they don't disturb the formatting
+# (i.e. DO NOT ADD A 'v' prefix!)
+GOLANGCI_LINT_VERSION := 1.61.0
-RUNINVM := vagrant/runinvm.sh
-
-default all: local-binary docs local-validate local-cross local-gccgo test-unit test-integration ## validate all checks, build and cross-build\nbinaries and docs, run tests in a VM
+default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
clean: ## remove all built files
$(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
-sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go)
-containers-storage: $(sources) ## build using gc on the host
- $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+containers-storage: ## build using gc on the host
+ $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+
+codespell:
+ codespell
binary local-binary: containers-storage
-local-gccgo: ## build using gccgo on the host
- GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
+local-gccgo gccgo: ## build using gccgo on the host
+ GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
-local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
- @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \
+local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
+ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/riscv64 linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
- echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
- env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
+ echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) ./... ; \
+ env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) ./... || exit 1 ; \
+ echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
+ env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
done
-cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
- $(RUNINVM) make local-$@
-
docs: install.tools ## build the docs on the host
$(MAKE) -C docs docs
-gccgo: ## build using gccgo using VMs
- $(RUNINVM) make local-$@
+local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests
-test: local-binary ## build the binaries and run the tests using VMs
- $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration
+local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
+ @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./...
-local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
- @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
-
-test-unit: local-binary ## run the unit tests using VMs
- $(RUNINVM) make local-$@
-
-local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
+local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash
-test-integration: local-binary ## run the integration tests using VMs
- $(RUNINVM) make local-$@
-
-local-validate: ## validate DCO and gofmt on the host
+local-validate validate: install.tools ## validate DCO on the host
@./hack/git-validation.sh
- @./hack/gofmt.sh
-
-validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs
- $(RUNINVM) make local-$@
install.tools:
- make -C tests/tools
-
-$(FFJSON):
- make -C tests/tools
+ $(MAKE) -C tests/tools GOLANGCI_LINT_VERSION=$(GOLANGCI_LINT_VERSION)
install.docs: docs
- make -C docs install
+ $(MAKE) -C docs install
install: install.docs
lint: install.tools
- tests/tools/build/golangci-lint run
+ tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)"
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
diff --git a/vendor/github.com/containers/storage/OWNERS b/vendor/github.com/containers/storage/OWNERS
new file mode 100644
index 000000000..a90f7e312
--- /dev/null
+++ b/vendor/github.com/containers/storage/OWNERS
@@ -0,0 +1,14 @@
+approvers:
+ - giuseppe
+ - kolyshkin
+ - mtrmac
+ - nalind
+ - rhatdan
+ - vrothberg
+reviewers:
+ - Honny1
+ - TomSweeneyRedHat
+ - flouthoc
+ - kolyshkin
+ - mrunalp
+ - vrothberg
diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md
index 1496a4c00..ab2c14182 100644
--- a/vendor/github.com/containers/storage/SECURITY.md
+++ b/vendor/github.com/containers/storage/SECURITY.md
@@ -1,3 +1,3 @@
## Security and Disclosure Information Policy for the Containers Storage Project
-The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
+The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 96cd6ee1e..3ebf789f5 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.32.1
+1.56.0
diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile
deleted file mode 100644
index c82c1f81b..000000000
--- a/vendor/github.com/containers/storage/Vagrantfile
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-# The fedora/28-cloud-base and debian/jessie64 boxes are also available for
-# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to
-# "virtualbox" to use them instead.
-#
-Vagrant.configure("2") do |config|
- config.vm.define "fedora" do |c|
- c.vm.box = "fedora/28-cloud-base"
- c.vm.synced_folder ".", "/vagrant", type: "rsync",
- rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
- c.vm.provision "shell", inline: <<-SHELL
- sudo /vagrant/vagrant/provision.sh
- SHELL
- end
- config.vm.define "debian" do |c|
- c.vm.box = "debian/jessie64"
- c.vm.synced_folder ".", "/vagrant", type: "rsync",
- rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
- c.vm.provision "shell", inline: <<-SHELL
- sudo /vagrant/vagrant/provision.sh
- SHELL
- end
-end
diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go
new file mode 100644
index 000000000..396648e7f
--- /dev/null
+++ b/vendor/github.com/containers/storage/check.go
@@ -0,0 +1,1158 @@
+package storage
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ drivers "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/types"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver,
+ // but which is not known to or managed by the higher-level driver-agnostic logic.
+ ErrLayerUnaccounted = types.ErrLayerUnaccounted
+ // ErrLayerUnreferenced describes a layer which is not used by any image or container.
+ ErrLayerUnreferenced = types.ErrLayerUnreferenced
+ // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more
+ // files which were added in the layer appear to have changed. It may instead look like an
+ // unnamed "file integrity checksum failed" error.
+ ErrLayerIncorrectContentDigest = types.ErrLayerIncorrectContentDigest
+ // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was
+ // used to populate the layer produced a diff of a different size. We check the digest
+ // first, so it's highly unlikely you'll ever see this error.
+ ErrLayerIncorrectContentSize = types.ErrLayerIncorrectContentSize
+ // ErrLayerContentModified describes a layer which contains contents which should not be
+ // there, or for which ownership/permissions/dates have been changed.
+ ErrLayerContentModified = types.ErrLayerContentModified
+ // ErrLayerDataMissing describes a layer which is missing a big data item.
+ ErrLayerDataMissing = types.ErrLayerDataMissing
+ // ErrLayerMissing describes a layer which is the missing parent of a layer.
+ ErrLayerMissing = types.ErrLayerMissing
+ // ErrImageLayerMissing describes an image which claims to have a layer that we don't know
+ // about.
+ ErrImageLayerMissing = types.ErrImageLayerMissing
+ // ErrImageDataMissing describes an image which is missing a big data item.
+ ErrImageDataMissing = types.ErrImageDataMissing
+ // ErrImageDataIncorrectSize describes an image which has a big data item which looks like
+ // its size has changed, likely because it's been modified somehow.
+ ErrImageDataIncorrectSize = types.ErrImageDataIncorrectSize
+ // ErrContainerImageMissing describes a container which claims to be based on an image that
+ // we don't know about.
+ ErrContainerImageMissing = types.ErrContainerImageMissing
+ // ErrContainerDataMissing describes a container which is missing a big data item.
+ ErrContainerDataMissing = types.ErrContainerDataMissing
+ // ErrContainerDataIncorrectSize describes a container which has a big data item which looks
+ // like its size has changed, likely because it's been modified somehow.
+ ErrContainerDataIncorrectSize = types.ErrContainerDataIncorrectSize
+)
+
+const (
+ defaultMaximumUnreferencedLayerAge = 24 * time.Hour
+)
+
+// CheckOptions is the set of options for Check(), specifying which tests to perform.
+type CheckOptions struct {
+ LayerUnreferencedMaximumAge *time.Duration // maximum allowed age of unreferenced layers
+ LayerDigests bool // check that contents of image layer diffs can still be reconstructed
+ LayerMountable bool // check that layers are mountable
+ LayerContents bool // check that contents of image layers match their diffs, with no unexpected changes, requires LayerMountable
+ LayerData bool // check that associated "big" data items are present and can be read
+ ImageData bool // check that associated "big" data items are present, can be read, and match the recorded size
+ ContainerData bool // check that associated "big" data items are present and can be read
+}
+
+// checkIgnore is used to tell functions that compare the contents of a mounted
+// layer to the contents that we'd expect it to have to ignore certain
+// discrepancies
+type checkIgnore struct {
+ ownership, timestamps, permissions bool
+}
+
+// CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
+func CheckMost() *CheckOptions {
+ return &CheckOptions{
+ LayerDigests: true,
+ LayerMountable: true,
+ LayerContents: false,
+ LayerData: true,
+ ImageData: true,
+ ContainerData: true,
+ }
+}
+
+// CheckEverything returns a CheckOptions with every check enabled.
+func CheckEverything() *CheckOptions {
+ return &CheckOptions{
+ LayerDigests: true,
+ LayerMountable: true,
+ LayerContents: true,
+ LayerData: true,
+ ImageData: true,
+ ContainerData: true,
+ }
+}
+
+// CheckReport is a list of detected problems.
+type CheckReport struct {
+ Layers map[string][]error // damaged read-write layers
+ ROLayers map[string][]error // damaged read-only layers
+ layerParentsByLayerID map[string]string
+ layerOrder map[string]int
+ Images map[string][]error // damaged read-write images (including those with damaged layers)
+ ROImages map[string][]error // damaged read-only images (including those with damaged layers)
+ Containers map[string][]error // damaged containers (including those based on damaged images)
+}
+
+// RepairOptions is the set of options for Repair().
+type RepairOptions struct {
+ RemoveContainers bool // Remove damaged containers
+}
+
+// RepairEverything returns a RepairOptions with every optional remediation
+// enabled.
+func RepairEverything() *RepairOptions {
+ return &RepairOptions{
+ RemoveContainers: true,
+ }
+}
+
+// Check returns a list of problems with what's in the store, as a whole. It can be very expensive
+// to call.
+func (s *store) Check(options *CheckOptions) (CheckReport, error) {
+ var ignore checkIgnore
+ for _, o := range s.graphOptions {
+ if strings.Contains(o, "ignore_chown_errors=true") {
+ ignore.ownership = true
+ }
+ if strings.HasPrefix(o, "force_mask=") {
+ ignore.permissions = true
+ }
+ }
+ for o := range s.pullOptions {
+ if strings.Contains(o, "use_hard_links") {
+ if s.pullOptions[o] == "true" {
+ ignore.timestamps = true
+ }
+ }
+ }
+
+ if options == nil {
+ options = CheckMost()
+ }
+
+ report := CheckReport{
+ Layers: make(map[string][]error),
+ ROLayers: make(map[string][]error),
+ layerParentsByLayerID: make(map[string]string), // layers ID -> their parent's ID, if there is one
+ layerOrder: make(map[string]int), // layers ID -> order for removal, if we needed to remove them all
+ Images: make(map[string][]error),
+ ROImages: make(map[string][]error),
+ Containers: make(map[string][]error),
+ }
+
+ // This map will track known layer IDs. If we have multiple stores, read-only ones can
+ // contain copies of layers that are in the read-write store, but we'll only ever be
+ // mounting or extracting contents from the read-write versions, since we always search it
+ // first. The boolean will track if the layer is referenced by at least one image or
+ // container.
+ referencedLayers := make(map[string]bool)
+ referencedROLayers := make(map[string]bool)
+
+ // This map caches the headers for items included in layer diffs.
+ diffHeadersByLayer := make(map[string][]*tar.Header)
+ var diffHeadersByLayerMutex sync.Mutex
+
+ // Walk the list of layer stores, looking at each layer that we didn't see in a
+ // previously-visited store.
+ if _, _, err := readOrWriteAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
+ layers, err := store.Layers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ isReadWrite := roLayerStoreIsReallyReadWrite(store)
+ readWriteDesc := ""
+ if !isReadWrite {
+ readWriteDesc = "read-only "
+ }
+ // Examine each layer in turn.
+ for i := range layers {
+ layer := layers[i]
+ id := layer.ID
+ // If we've already seen a layer with this ID, no need to process it again.
+ if _, checked := referencedLayers[id]; checked {
+ continue
+ }
+ if _, checked := referencedROLayers[id]; checked {
+ continue
+ }
+ // Note the parent of this layer, and add it to the map of known layers so
+ // that we know that we've visited it, but we haven't confirmed that it's
+ // used by anything.
+ report.layerParentsByLayerID[id] = layer.Parent
+ if isReadWrite {
+ referencedLayers[id] = false
+ } else {
+ referencedROLayers[id] = false
+ }
+ logrus.Debugf("checking %slayer %s", readWriteDesc, id)
+ // Check that all of the big data items are present and can be read. We
+ // have no digest or size information to compare the contents to (grumble),
+ // so we can't verify that the contents haven't been changed since they
+ // were stored.
+ if options.LayerData {
+ for _, name := range layer.BigDataNames {
+ func() {
+ rc, err := store.BigData(id, name)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err := fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, ErrLayerDataMissing)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ defer rc.Close()
+ if _, err = io.Copy(io.Discard, rc); err != nil {
+ err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ }()
+ }
+ }
+ // Check that the content we get back when extracting the layer's contents
+ // match the recorded digest and size. A layer for which they're not given
+ // isn't a part of an image, and is likely the read-write layer for a
+ // container, and we can't vouch for the integrity of its contents.
+ // For each layer with known contents, record the headers for the layer's
+ // diff, which we can use to reconstruct the expected contents for the tree
+ // we see when the layer is mounted.
+ if options.LayerDigests && layer.UncompressedDigest != "" {
+ func() {
+ expectedDigest := layer.UncompressedDigest
+ // Double-check that the digest isn't invalid somehow.
+ if err := layer.UncompressedDigest.Validate(); err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Extract the diff.
+ uncompressed := archive.Uncompressed
+ diffOptions := DiffOptions{
+ Compression: &uncompressed,
+ }
+ diff, err := store.Diff("", id, &diffOptions)
+ if err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Digest and count the length of the diff.
+ digester := expectedDigest.Algorithm().Digester()
+ counter := ioutils.NewWriteCounter(digester.Hash())
+ reader := io.TeeReader(diff, counter)
+ var wg sync.WaitGroup
+ var archiveErr error
+ wg.Add(1)
+ go func(layerID string, diffReader io.Reader) {
+ // Read the diff, one item at a time.
+ tr := tar.NewReader(diffReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ diffHeadersByLayerMutex.Lock()
+ diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr)
+ diffHeadersByLayerMutex.Unlock()
+ hdr, err = tr.Next()
+ }
+ if !errors.Is(err, io.EOF) {
+ archiveErr = err
+ }
+ // consume any trailer after the EOF marker
+ if _, err := io.Copy(io.Discard, diffReader); err != nil {
+ err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err)
+ if isReadWrite {
+ report.Layers[layerID] = append(report.Layers[layerID], err)
+ } else {
+ report.ROLayers[layerID] = append(report.ROLayers[layerID], err)
+ }
+ }
+ wg.Done()
+ }(id, reader)
+ wg.Wait()
+ diff.Close()
+ if archiveErr != nil {
+ // Reading the diff didn't end as expected
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ archiveErr = fmt.Errorf("%slayer %s: %w", readWriteDesc, id, archiveErr)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], archiveErr)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], archiveErr)
+ }
+ return
+ }
+ if digester.Digest() != layer.UncompressedDigest {
+ // The diff digest didn't match.
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, ErrLayerIncorrectContentDigest)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ }
+ if layer.UncompressedSize != -1 && counter.Count != layer.UncompressedSize {
+ // We expected the diff to have a specific size, and
+ // it didn't match.
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ err := fmt.Errorf("%slayer %s: read %d bytes instead of %d bytes: %w", readWriteDesc, id, counter.Count, layer.UncompressedSize, ErrLayerIncorrectContentSize)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ }
+ }()
+ }
+ }
+ // At this point we're out of things that we can be sure will work in read-only
+ // stores, so skip the rest for any stores that aren't also read-write stores.
+ if !isReadWrite {
+ return struct{}{}, false, nil
+ }
+ // Content and mount checks are also things that we can only be sure will work in
+ // read-write stores.
+ for i := range layers {
+ layer := layers[i]
+ id := layer.ID
+ // Compare to what we see when we mount the layer and walk the tree, and
+ // flag cases where content is in the layer that shouldn't be there. The
+ // tar-split implementation of Diff() won't catch this problem by itself.
+ if options.LayerMountable {
+ func() {
+ // Mount the layer.
+ mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel, Options: []string{"ro"}})
+ if err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Unmount the layer when we're done in here.
+ defer func() {
+ if err := s.graphDriver.Put(id); err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ }()
+ // If we're not looking at layer contents, or we didn't
+ // look at the diff for this layer, we're done here.
+ if !options.LayerDigests || layer.UncompressedDigest == "" || !options.LayerContents {
+ return
+ }
+ // Build a list of all of the changes in all of the layers
+ // that make up the tree we're looking at.
+ diffHeaderSet := [][]*tar.Header{}
+ // If we don't know _all_ of the changes that produced this
+ // layer, it's not part of an image, so we're done here.
+ for layerID := id; layerID != ""; layerID = report.layerParentsByLayerID[layerID] {
+ diffHeadersByLayerMutex.Lock()
+ layerChanges, haveChanges := diffHeadersByLayer[layerID]
+ diffHeadersByLayerMutex.Unlock()
+ if !haveChanges {
+ return
+ }
+ // The diff headers for this layer go _before_ those of
+ // layers that inherited some of its contents.
+ diffHeaderSet = append([][]*tar.Header{layerChanges}, diffHeaderSet...)
+ }
+ expectedCheckDirectory := newCheckDirectoryDefaults()
+ for _, diffHeaders := range diffHeaderSet {
+ expectedCheckDirectory.headers(diffHeaders)
+ }
+ // Scan the directory tree under the mount point.
+ var idmap *idtools.IDMappings
+ if !s.canUseShifting(layer.UIDMap, layer.GIDMap) {
+ // we would have had to chown() layer contents to match ID maps
+ idmap = idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
+ }
+ actualCheckDirectory, err := newCheckDirectoryFromDirectory(mountPoint)
+ if err != nil {
+ err := fmt.Errorf("scanning contents of %slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Every departure from our expectations is an error.
+ diffs := compareCheckDirectory(expectedCheckDirectory, actualCheckDirectory, idmap, ignore)
+ for _, diff := range diffs {
+ err := fmt.Errorf("%slayer %s: %s, %w", readWriteDesc, id, diff, ErrLayerContentModified)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ }
+ }()
+ }
+ }
+ // Check that we don't have any dangling parent layer references.
+ for id, parent := range report.layerParentsByLayerID {
+ // If this layer doesn't have a parent, no problem.
+ if parent == "" {
+ continue
+ }
+ // If we've already seen a layer with this parent ID, skip it.
+ if _, checked := referencedLayers[parent]; checked {
+ continue
+ }
+ if _, checked := referencedROLayers[parent]; checked {
+ continue
+ }
+ // We haven't seen a layer with the ID that this layer's record
+ // says is its parent's ID.
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, parent, ErrLayerMissing)
+ report.Layers[id] = append(report.Layers[id], err)
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // This map will track examined images. If we have multiple stores, read-only ones can
+ // contain copies of images that are also in the read-write store, or the read-write store
+ // may contain a duplicate entry that refers to layers in the read-only stores, but when
+ // trying to export them, we only look at the first copy of the image.
+ examinedImages := make(map[string]struct{})
+
+ // Walk the list of image stores, looking at each image that we didn't see in a
+ // previously-visited store.
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
+ images, err := store.Images()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ isReadWrite := roImageStoreIsReallyReadWrite(store)
+ readWriteDesc := ""
+ if !isReadWrite {
+ readWriteDesc = "read-only "
+ }
+ // Examine each image in turn.
+ for i := range images {
+ image := images[i]
+ id := image.ID
+ // If we've already seen an image with this ID, skip it.
+ if _, checked := examinedImages[id]; checked {
+ continue
+ }
+ examinedImages[id] = struct{}{}
+ logrus.Debugf("checking %simage %s", readWriteDesc, id)
+ if options.ImageData {
+ // Check that all of the big data items are present and reading them
+ // back gives us the right amount of data. Even though we record
+ // digests that can be used to look them up, we don't know how they
+ // were calculated (they're only used as lookup keys), so do not try
+ // to check them.
+ for _, key := range image.BigDataNames {
+ func() {
+ data, err := store.BigData(id, key)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataMissing)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, err)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ if int64(len(data)) != image.BigDataSizes[key] {
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataIncorrectSize)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ }()
+ }
+ }
+ // Walk the layers list for the image. For every layer that the image uses
+ // that has errors, the layer's errors are also the image's errors.
+ examinedImageLayers := make(map[string]struct{})
+ for _, topLayer := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
+ if topLayer == "" {
+ continue
+ }
+ if _, checked := examinedImageLayers[topLayer]; checked {
+ continue
+ }
+ examinedImageLayers[topLayer] = struct{}{}
+ for layer := topLayer; layer != ""; layer = report.layerParentsByLayerID[layer] {
+ // The referenced layer should have a corresponding entry in
+ // one map or the other.
+ _, checked := referencedLayers[layer]
+ _, checkedRO := referencedROLayers[layer]
+ if !checked && !checkedRO {
+ err := fmt.Errorf("layer %s: %w", layer, ErrImageLayerMissing)
+ err = fmt.Errorf("%simage %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ } else {
+ // Count this layer as referenced. Whether by the
+ // image or one of its child layers doesn't matter
+ // at this point.
+ if _, ok := referencedLayers[layer]; ok {
+ referencedLayers[layer] = true
+ }
+ if _, ok := referencedROLayers[layer]; ok {
+ referencedROLayers[layer] = true
+ }
+ }
+ if isReadWrite {
+ if len(report.Layers[layer]) > 0 {
+ report.Images[id] = append(report.Images[id], report.Layers[layer]...)
+ }
+ if len(report.ROLayers[layer]) > 0 {
+ report.Images[id] = append(report.Images[id], report.ROLayers[layer]...)
+ }
+ } else {
+ if len(report.Layers[layer]) > 0 {
+ report.ROImages[id] = append(report.ROImages[id], report.Layers[layer]...)
+ }
+ if len(report.ROLayers[layer]) > 0 {
+ report.ROImages[id] = append(report.ROImages[id], report.ROLayers[layer]...)
+ }
+ }
+ }
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // Iterate over each container in turn.
+ if _, _, err := readContainerStore(s, func() (struct{}, bool, error) {
+ containers, err := s.containerStore.Containers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ for i := range containers {
+ container := containers[i]
+ id := container.ID
+ logrus.Debugf("checking container %s", id)
+ if options.ContainerData {
+ // Check that all of the big data items are present and reading them
+ // back gives us the right amount of data.
+ for _, key := range container.BigDataNames {
+ func() {
+ data, err := s.containerStore.BigData(id, key)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataMissing)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, err)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ if int64(len(data)) != container.BigDataSizes[key] {
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataIncorrectSize)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ }()
+ }
+ }
+ // Look at the container's base image. If the image has errors, the image's errors
+ // are the container's errors.
+ if container.ImageID != "" {
+ if _, checked := examinedImages[container.ImageID]; !checked {
+ err := fmt.Errorf("image %s: %w", container.ImageID, ErrContainerImageMissing)
+ report.Containers[id] = append(report.Containers[id], err)
+ }
+ if len(report.Images[container.ImageID]) > 0 {
+ report.Containers[id] = append(report.Containers[id], report.Images[container.ImageID]...)
+ }
+ if len(report.ROImages[container.ImageID]) > 0 {
+ report.Containers[id] = append(report.Containers[id], report.ROImages[container.ImageID]...)
+ }
+ }
+ // Count the container's layer as referenced.
+ if container.LayerID != "" {
+ referencedLayers[container.LayerID] = true
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // Now go back through all of the layer stores, and flag any layers which don't belong
+ // to an image or a container, and has been around longer than we can reasonably expect
+ // such a layer to be present before a corresponding image record is added.
+ if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
+ if isReadWrite := roLayerStoreIsReallyReadWrite(store); !isReadWrite {
+ return struct{}{}, false, nil
+ }
+ layers, err := store.Layers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ for _, layer := range layers {
+ maximumAge := defaultMaximumUnreferencedLayerAge
+ if options.LayerUnreferencedMaximumAge != nil {
+ maximumAge = *options.LayerUnreferencedMaximumAge
+ }
+ if referenced := referencedLayers[layer.ID]; !referenced {
+ if layer.Created.IsZero() || layer.Created.Add(maximumAge).Before(time.Now()) {
+ // Either we don't (and never will) know when this layer was
+ // created, or it was created far enough in the past that we're
+ // reasonably sure it's not part of an image that's being written
+ // right now.
+ err := fmt.Errorf("layer %s: %w", layer.ID, ErrLayerUnreferenced)
+ report.Layers[layer.ID] = append(report.Layers[layer.ID], err)
+ }
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // If the driver can tell us about which layers it knows about, we should have previously
+ // examined all of them. Any that we didn't are probably just wasted space.
+ // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported.
+ if err := s.startUsingGraphDriver(); err != nil {
+ return CheckReport{}, err
+ }
+ defer s.stopUsingGraphDriver()
+ layerList, err := s.graphDriver.ListLayers()
+ if err != nil && !errors.Is(err, drivers.ErrNotSupported) {
+ return CheckReport{}, err
+ }
+ if !errors.Is(err, drivers.ErrNotSupported) {
+ for i, id := range layerList {
+ if _, known := referencedLayers[id]; !known {
+ err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted)
+ report.Layers[id] = append(report.Layers[id], err)
+ }
+ report.layerOrder[id] = i + 1
+ }
+ }
+
+ return report, nil
+}
+
+func roLayerStoreIsReallyReadWrite(store roLayerStore) bool {
+ return store.(*layerStore).lockfile.IsReadWrite()
+}
+
+func roImageStoreIsReallyReadWrite(store roImageStore) bool {
+ return store.(*imageStore).lockfile.IsReadWrite()
+}
+
+// Repair removes items which are themselves damaged, or which depend on items which are damaged.
+// Errors are returned if an attempt to delete an item fails.
+func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
+ if options == nil {
+ options = RepairEverything()
+ }
+ var errs []error
+ // Just delete damaged containers.
+ if options.RemoveContainers {
+ for id := range report.Containers {
+ err := s.DeleteContainer(id)
+ if err != nil && !errors.Is(err, ErrContainerUnknown) {
+ err := fmt.Errorf("deleting container %s: %w", id, err)
+ errs = append(errs, err)
+ }
+ }
+ }
+ // Now delete damaged images. Note which layers were removed as part of removing those images.
+ deletedLayers := make(map[string]struct{})
+ for id := range report.Images {
+ layers, err := s.DeleteImage(id, true)
+ if err != nil {
+ if !errors.Is(err, ErrImageUnknown) && !errors.Is(err, ErrLayerUnknown) {
+ err := fmt.Errorf("deleting image %s: %w", id, err)
+ errs = append(errs, err)
+ }
+ } else {
+ for _, layer := range layers {
+ logrus.Debugf("deleted layer %s", layer)
+ deletedLayers[layer] = struct{}{}
+ }
+ logrus.Debugf("deleted image %s", id)
+ }
+ }
+ // Build a list of the layers that we need to remove, sorted with parents of layers before
+ // layers that they are parents of.
+ layersToDelete := make([]string, 0, len(report.Layers))
+ for id := range report.Layers {
+ layersToDelete = append(layersToDelete, id)
+ }
+ depth := func(id string) int {
+ d := 0
+ parent := report.layerParentsByLayerID[id]
+ for parent != "" {
+ d++
+ parent = report.layerParentsByLayerID[parent]
+ }
+ return d
+ }
+ isUnaccounted := func(errs []error) bool {
+ return slices.ContainsFunc(errs, func(err error) bool {
+ return errors.Is(err, ErrLayerUnaccounted)
+ })
+ }
+ sort.Slice(layersToDelete, func(i, j int) bool {
+ // we've not heard of either of them, so remove them in the order the driver suggested
+ if isUnaccounted(report.Layers[layersToDelete[i]]) &&
+ isUnaccounted(report.Layers[layersToDelete[j]]) &&
+ report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 {
+ return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]]
+ }
+ // always delete the one we've heard of first
+ if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) {
+ return false
+ }
+ // always delete the one we've heard of first
+ if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) {
+ return true
+ }
+ // we've heard of both of them; the one that's on the end of a longer chain goes first
+ return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later
+ })
+ // Now delete the layers that haven't been removed along with images.
+ for _, id := range layersToDelete {
+ if _, ok := deletedLayers[id]; ok {
+ continue
+ }
+ for _, reportedErr := range report.Layers[id] {
+ var err error
+ // If a layer was unaccounted for, remove it at the storage driver level.
+ // Otherwise, remove it at the higher level and let the higher level
+ // logic worry about telling the storage driver to delete the layer.
+ if errors.Is(reportedErr, ErrLayerUnaccounted) {
+ if err = s.graphDriver.Remove(id); err != nil {
+ err = fmt.Errorf("deleting storage layer %s: %v", id, err)
+ } else {
+ logrus.Debugf("deleted storage layer %s", id)
+ }
+ } else {
+ var stillMounted bool
+ if stillMounted, err = s.Unmount(id, true); err == nil && !stillMounted {
+ logrus.Debugf("unmounted layer %s", id)
+ } else if err != nil {
+ logrus.Debugf("unmounting layer %s: %v", id, err)
+ } else {
+ logrus.Debugf("layer %s still mounted", id)
+ }
+ if err = s.DeleteLayer(id); err != nil {
+ err = fmt.Errorf("deleting layer %s: %w", id, err)
+ logrus.Debugf("deleted layer %s", id)
+ }
+ }
+ if err != nil && !errors.Is(err, ErrLayerUnknown) && !errors.Is(err, ErrNotALayer) && !errors.Is(err, os.ErrNotExist) {
+ errs = append(errs, err)
+ }
+ }
+ }
+ return errs
+}
+
+// compareFileInfo returns a string summarizing what's different between the two checkFileInfos
+func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string {
+ var comparison []string
+ if a.typeflag != b.typeflag {
+ comparison = append(comparison, fmt.Sprintf("filetype:%vï¿«%v", a.typeflag, b.typeflag))
+ }
+ if idmap != nil && !idmap.Empty() {
+ mappedUID, mappedGID, err := idmap.ToContainer(idtools.IDPair{UID: b.uid, GID: b.gid})
+ if err != nil {
+ return err.Error()
+ }
+ b.uid, b.gid = mappedUID, mappedGID
+ }
+ if a.uid != b.uid && !ignore.ownership {
+ comparison = append(comparison, fmt.Sprintf("uid:%dï¿«%d", a.uid, b.uid))
+ }
+ if a.gid != b.gid && !ignore.ownership {
+ comparison = append(comparison, fmt.Sprintf("gid:%dï¿«%d", a.gid, b.gid))
+ }
+ if a.size != b.size {
+ comparison = append(comparison, fmt.Sprintf("size:%dï¿«%d", a.size, b.size))
+ }
+ if (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode && !ignore.permissions {
+ comparison = append(comparison, fmt.Sprintf("mode:%04oï¿«%04o", a.mode, b.mode))
+ }
+ if a.mtime != b.mtime && !ignore.timestamps {
+ comparison = append(comparison, fmt.Sprintf("mtime:0x%xï¿«0x%x", a.mtime, b.mtime))
+ }
+ return strings.Join(comparison, ",")
+}
+
+// checkFileInfo is what we care about for files
+type checkFileInfo struct {
+ typeflag byte
+ uid, gid int
+ size int64
+ mode os.FileMode
+ mtime int64 // unix-style whole seconds
+}
+
+// checkDirectory is a node in a filesystem record, possibly the top
+type checkDirectory struct {
+ directory map[string]*checkDirectory // subdirectories
+ file map[string]checkFileInfo // non-directories
+ checkFileInfo
+}
+
+// newCheckDirectory creates an empty checkDirectory
+func newCheckDirectory(uid, gid int, size int64, mode os.FileMode, mtime int64) *checkDirectory {
+ return &checkDirectory{
+ directory: make(map[string]*checkDirectory),
+ file: make(map[string]checkFileInfo),
+ checkFileInfo: checkFileInfo{
+ typeflag: tar.TypeDir,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ mtime: mtime,
+ },
+ }
+}
+
+// newCheckDirectoryDefaults creates an empty checkDirectory with hardwired defaults for the UID
+// (0), GID (0), size (0) and permissions (0o555)
+func newCheckDirectoryDefaults() *checkDirectory {
+ return newCheckDirectory(0, 0, 0, 0o555, time.Now().Unix())
+}
+
+// newCheckDirectoryFromDirectory creates a checkDirectory for an on-disk directory tree
+func newCheckDirectoryFromDirectory(dir string) (*checkDirectory, error) {
+ cd := newCheckDirectoryDefaults()
+ err := filepath.Walk(dir, func(walkpath string, info os.FileInfo, err error) error {
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ rel, err := filepath.Rel(dir, walkpath)
+ if err != nil {
+ return err
+ }
+ hdr, err := tar.FileInfoHeader(info, "") // we don't record link targets, so don't bother looking it up
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.ToSlash(rel)
+ cd.header(hdr)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return cd, nil
+}
+
+// add adds an item to a checkDirectory
+func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int64, mode os.FileMode, mtime int64) {
+ components := strings.Split(path, "/")
+ if components[len(components)-1] == "" {
+ components = components[:len(components)-1]
+ }
+ if components[0] == "." {
+ components = components[1:]
+ }
+ if typeflag != tar.TypeReg {
+ size = 0
+ }
+ switch len(components) {
+ case 0:
+ c.uid = uid
+ c.gid = gid
+ c.mode = mode
+ c.mtime = mtime
+ return
+ case 1:
+ switch typeflag {
+ case tar.TypeDir:
+ delete(c.file, components[0])
+ // directory entries are mergers, not replacements
+ if _, present := c.directory[components[0]]; !present {
+ c.directory[components[0]] = newCheckDirectory(uid, gid, size, mode, mtime)
+ } else {
+ c.directory[components[0]].checkFileInfo = checkFileInfo{
+ typeflag: tar.TypeDir,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ mtime: mtime,
+ }
+ }
+ case tar.TypeXGlobalHeader:
+ // ignore, since even though it looks like a valid pathname, it doesn't end
+ // up on the filesystem
+ default:
+ // treat these as TypeReg items
+ delete(c.directory, components[0])
+ c.file[components[0]] = checkFileInfo{
+ typeflag: typeflag,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ mtime: mtime,
+ }
+ }
+ return
+ }
+ subdirectory := c.directory[components[0]]
+ if subdirectory == nil {
+ subdirectory = newCheckDirectory(uid, gid, size, mode, mtime)
+ c.directory[components[0]] = subdirectory
+ }
+ subdirectory.add(strings.Join(components[1:], "/"), typeflag, uid, gid, size, mode, mtime)
+}
+
+// remove removes an item from a checkDirectory
+func (c *checkDirectory) remove(path string) {
+ components := strings.Split(path, "/")
+ if len(components) == 1 {
+ delete(c.directory, components[0])
+ delete(c.file, components[0])
+ return
+ }
+ subdirectory := c.directory[components[0]]
+ if subdirectory != nil {
+ subdirectory.remove(strings.Join(components[1:], "/"))
+ }
+}
+
+// header updates a checkDirectory using information from the passed-in header
+func (c *checkDirectory) header(hdr *tar.Header) {
+ name := path.Clean(hdr.Name)
+ dir, base := path.Split(name)
+ if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
+ if base == archive.WhiteoutOpaqueDir {
+ c.remove(path.Clean(dir))
+ c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
+ } else {
+ c.remove(path.Join(dir, file))
+ }
+ } else {
+ if hdr.Typeflag == tar.TypeLink {
+ // look up the attributes of the target of the hard link
+ // n.b. by convention, Linkname is always relative to the
+ // root directory of the archive, which is not always the
+ // same as being relative to hdr.Name
+ directory := c
+ for _, component := range strings.Split(path.Clean(hdr.Linkname), "/") {
+ if component == "." || component == ".." {
+ continue
+ }
+ if subdir, ok := directory.directory[component]; ok {
+ directory = subdir
+ continue
+ }
+ if file, ok := directory.file[component]; ok {
+ hdr.Typeflag = file.typeflag
+ hdr.Uid = file.uid
+ hdr.Gid = file.gid
+ hdr.Size = file.size
+ hdr.Mode = int64(file.mode)
+ hdr.ModTime = time.Unix(file.mtime, 0)
+ }
+ break
+ }
+ }
+ c.add(name, hdr.Typeflag, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
+ }
+}
+
+// headers updates a checkDirectory using information from the passed-in header slice
+func (c *checkDirectory) headers(hdrs []*tar.Header) {
+ hdrs = slices.Clone(hdrs)
+ // sort the headers from the diff to ensure that whiteouts appear
+ // before content when they both appear in the same directory, per
+ // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
+ // and that hard links appear after other types of entries
+ sort.SliceStable(hdrs, func(i, j int) bool {
+ if hdrs[i].Typeflag != tar.TypeLink && hdrs[j].Typeflag == tar.TypeLink {
+ return true
+ }
+ if hdrs[i].Typeflag == tar.TypeLink && hdrs[j].Typeflag != tar.TypeLink {
+ return false
+ }
+ idir, ifile := path.Split(hdrs[i].Name)
+ jdir, jfile := path.Split(hdrs[j].Name)
+ if idir != jdir {
+ return hdrs[i].Name < hdrs[j].Name
+ }
+ if ifile == archive.WhiteoutOpaqueDir {
+ return true
+ }
+ if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) {
+ return true
+ }
+ return false
+ })
+ for _, hdr := range hdrs {
+ c.header(hdr)
+ }
+}
+
+// names provides a sorted list of the path names in the directory tree
+func (c *checkDirectory) names() []string {
+ names := make([]string, 0, len(c.file)+len(c.directory))
+ for name := range c.file {
+ names = append(names, name)
+ }
+ for name, subdirectory := range c.directory {
+ names = append(names, name+"/")
+ for _, subname := range subdirectory.names() {
+ names = append(names, name+"/"+subname)
+ }
+ }
+ return names
+}
+
+// compareCheckSubdirectory walks two subdirectory trees and returns a list of differences
+func compareCheckSubdirectory(path string, a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string {
+ var diff []string
+ if a == nil {
+ a = newCheckDirectoryDefaults()
+ }
+ if b == nil {
+ b = newCheckDirectoryDefaults()
+ }
+ for aname, adir := range a.directory {
+ if bdir, present := b.directory[aname]; !present {
+ // directory was removed
+ diff = append(diff, "-"+path+"/"+aname+"/")
+ diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, nil, idmap, ignore)...)
+ } else {
+ // directory is in both trees; descend
+ if attributes := compareFileInfo(adir.checkFileInfo, bdir.checkFileInfo, idmap, ignore); attributes != "" {
+ diff = append(diff, path+"/"+aname+"("+attributes+")")
+ }
+ diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, bdir, idmap, ignore)...)
+ }
+ }
+ for bname, bdir := range b.directory {
+ if _, present := a.directory[bname]; !present {
+ // directory added
+ diff = append(diff, "+"+path+"/"+bname+"/")
+ diff = append(diff, compareCheckSubdirectory(path+"/"+bname, nil, bdir, idmap, ignore)...)
+ }
+ }
+ for aname, afile := range a.file {
+ if bfile, present := b.file[aname]; !present {
+ // non-directory removed or replaced
+ diff = append(diff, "-"+path+"/"+aname)
+ } else {
+ // item is in both trees; compare
+ if attributes := compareFileInfo(afile, bfile, idmap, ignore); attributes != "" {
+ diff = append(diff, path+"/"+aname+"("+attributes+")")
+ }
+ }
+ }
+ for bname := range b.file {
+ filetype, present := a.file[bname]
+ if !present {
+ // non-directory added or replaced with something else
+ diff = append(diff, "+"+path+"/"+bname)
+ continue
+ }
+ if attributes := compareFileInfo(filetype, b.file[bname], idmap, ignore); attributes != "" {
+ // non-directory replaced with non-directory
+ diff = append(diff, "+"+path+"/"+bname+"("+attributes+")")
+ }
+ }
+ return diff
+}
+
+// compareCheckDirectory walks two directory trees and returns a sorted list of differences
+func compareCheckDirectory(a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string {
+ diff := compareCheckSubdirectory("", a, b, idmap, ignore)
+ sort.Slice(diff, func(i, j int) bool {
+ if strings.Compare(diff[i][1:], diff[j][1:]) < 0 {
+ return true
+ }
+ if diff[i][0] == '-' {
+ return true
+ }
+ return false
+ })
+ return diff
+}
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index b4f773f2b..143fde297 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -1,21 +1,39 @@
package storage
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
+ "slices"
"sync"
"time"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
+type containerLocations uint8
+
+// The backing store is split in two json files, one (the volatile)
+// that is written without fsync() meaning it isn't as robust to
+// unclean shutdown
+const (
+ stableContainerLocation containerLocations = 1 << iota
+ volatileContainerLocation
+
+ numContainerLocationIndex = iota
+)
+
+func containerLocationFromIndex(index int) containerLocations {
+ return 1 << index
+}
+
// A Container is a reference to a read-write layer with metadata.
type Container struct {
// ID is either one which was specified at create-time, or a random
@@ -65,26 +83,41 @@ type Container struct {
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
+
+ // volatileStore is true if the container is from the volatile json file
+ volatileStore bool `json:"-"`
}
-// ContainerStore provides bookkeeping for information about Containers.
-type ContainerStore interface {
- FileBasedStore
- MetadataStore
- ContainerBigDataStore
- FlaggableStore
+// rwContainerStore provides bookkeeping for information about Containers.
+type rwContainerStore interface {
+ metadataStore
+ containerBigDataStore
+ flaggableStore
+
+ // startWriting makes sure the store is fresh, and locks it for writing.
+ // If this succeeds, the caller MUST call stopWriting().
+ startWriting() error
+
+ // stopWriting releases locks obtained by startWriting.
+ stopWriting()
- // Create creates a container that has a specified ID (or generates a
+ // startReading makes sure the store is fresh, and locks it for reading.
+ // If this succeeds, the caller MUST call stopReading().
+ startReading() error
+
+ // stopReading releases locks obtained by startReading.
+ stopReading()
+
+ // create creates a container that has a specified ID (or generates a
// random one if an empty value is supplied) and optional names,
// based on the specified image, using the specified layer as its
// read-write layer.
// The maps in the container's options structure are recorded for the
// convenience of the caller, nothing more.
- Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
+ create(id string, names []string, image, layer string, options *ContainerOptions) (*Container, error)
- // SetNames updates the list of names associated with the container
- // with the specified ID.
- SetNames(id string, names []string) error
+ // updateNames modifies names associated with a container based on (op, names).
+ updateNames(id string, names []string, op updateNameOperation) error
// Get retrieves information about a container given an ID or name.
Get(id string) (*Container, error)
@@ -104,57 +137,68 @@ type ContainerStore interface {
// Containers returns a slice enumerating the known containers.
Containers() ([]Container, error)
+
+ // Clean up unreferenced datadirs
+ GarbageCollect() error
}
type containerStore struct {
- lockfile Locker
- dir string
+ // The following fields are only set when constructing containerStore, and must never be modified afterwards.
+ // They are safe to access without any other locking.
+ lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process.
+ dir string
+ jsonPath [numContainerLocationIndex]string
+
+ inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
+ // The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
+ // Almost all users should use startReading() or startWriting().
+ lastWrite lockfile.LastWrite
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
byname map[string]*Container
- loadMut sync.Mutex
}
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
- Names: copyStringSlice(c.Names),
+ Names: copySlicePreferringNil(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
- BigDataNames: copyStringSlice(c.BigDataNames),
- BigDataSizes: copyStringInt64Map(c.BigDataSizes),
- BigDataDigests: copyStringDigestMap(c.BigDataDigests),
+ BigDataNames: copySlicePreferringNil(c.BigDataNames),
+ BigDataSizes: copyMapPreferringNil(c.BigDataSizes),
+ BigDataDigests: copyMapPreferringNil(c.BigDataDigests),
Created: c.Created,
- UIDMap: copyIDMap(c.UIDMap),
- GIDMap: copyIDMap(c.GIDMap),
- Flags: copyStringInterfaceMap(c.Flags),
+ UIDMap: copySlicePreferringNil(c.UIDMap),
+ GIDMap: copySlicePreferringNil(c.GIDMap),
+ Flags: copyMapPreferringNil(c.Flags),
+ volatileStore: c.volatileStore,
}
}
func (c *Container) MountLabel() string {
- if label, ok := c.Flags["MountLabel"].(string); ok {
+ if label, ok := c.Flags[mountLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) ProcessLabel() string {
- if label, ok := c.Flags["ProcessLabel"].(string); ok {
+ if label, ok := c.Flags[processLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) MountOpts() []string {
- switch c.Flags["MountOpts"].(type) {
+ switch value := c.Flags[mountOptsFlag].(type) {
case []string:
- return c.Flags["MountOpts"].([]string)
+ return value
case []interface{}:
var mountOpts []string
- for _, v := range c.Flags["MountOpts"].([]interface{}) {
+ for _, v := range value {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}
@@ -165,6 +209,187 @@ func (c *Container) MountOpts() []string {
}
}
+// The caller must hold r.inProcessLock for reading.
+func containerLocation(c *Container) containerLocations {
+ if c.volatileStore {
+ return volatileContainerLocation
+ }
+ return stableContainerLocation
+}
+
+// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+//
+// This is an internal implementation detail of containerStore construction, every other caller
+// should use startWriting() instead.
+func (r *containerStore) startWritingWithReload(canReload bool) error {
+ r.lockfile.Lock()
+ r.inProcessLock.Lock()
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+ }
+ }()
+
+ if canReload {
+ if _, err := r.reloadIfChanged(true); err != nil {
+ return err
+ }
+ }
+
+ succeeded = true
+ return nil
+}
+
+// startWriting makes sure the store is fresh, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+func (r *containerStore) startWriting() error {
+ return r.startWritingWithReload(true)
+}
+
+// stopWriting releases locks obtained by startWriting.
+func (r *containerStore) stopWriting() {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+}
+
+// startReading makes sure the store is fresh, and locks it for reading.
+// If this succeeds, the caller MUST call stopReading().
+func (r *containerStore) startReading() error {
+ // inProcessLocked calls the nested function with r.inProcessLock held for writing.
+ inProcessLocked := func(fn func() error) error {
+ r.inProcessLock.Lock()
+ defer r.inProcessLock.Unlock()
+ return fn()
+ }
+
+ r.lockfile.RLock()
+ unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil.
+ defer func() {
+ if unlockFn != nil {
+ unlockFn()
+ }
+ }()
+ r.inProcessLock.RLock()
+ unlockFn = r.stopReading
+
+ // If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
+ _, modified, err := r.modified()
+ if err != nil {
+ return err
+ }
+ if modified {
+ // We are unlucky, and need to reload.
+ // NOTE: Multiple goroutines can get to this place approximately simultaneously.
+ r.inProcessLock.RUnlock()
+ unlockFn = r.lockfile.Unlock
+
+ // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally
+ // trigger a load below; we (lock and) reloadIfChanged() again.
+
+ // First try reloading with r.lockfile held for reading.
+ // r.inProcessLock will serialize all goroutines that got here;
+ // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
+ var tryLockedForWriting bool
+ if err := inProcessLocked(func() error {
+ // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile,
+ // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock.
+ var err error
+ tryLockedForWriting, err = r.reloadIfChanged(false)
+ return err
+ }); err != nil {
+ if !tryLockedForWriting {
+ return err
+ }
+ // Not good enough, we need r.lockfile held for writing. So, let’s do that.
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.Lock()
+ unlockFn = r.lockfile.Unlock
+ if err := inProcessLocked(func() error {
+ _, err := r.reloadIfChanged(true)
+ return err
+ }); err != nil {
+ return err
+ }
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.RLock()
+ unlockFn = r.lockfile.Unlock
+ // We need to check for a reload once more because the on-disk state could have been modified
+ // after we released the lock.
+ // If that, _again_, finds inconsistent state, just give up.
+ // We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
+ // shouldn’t be saved (by correct implementations) in the first place.
+ if err := inProcessLocked(func() error {
+ _, err := r.reloadIfChanged(false)
+ return err
+ }); err != nil {
+ return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
+ }
+ }
+
+ // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because
+ // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
+ // protects us against in-process writers modifying data.
+ // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
+ // and 2) access to the in-memory data is not racy;
+ // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
+
+ r.inProcessLock.RLock()
+ }
+ unlockFn = nil
+ return nil
+}
+
+// stopReading releases locks obtained by startReading.
+func (r *containerStore) stopReading() {
+ r.inProcessLock.RUnlock()
+ r.lockfile.Unlock()
+}
+
+// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
+// and a lockfile.LastWrite value for that update.
+//
+// The caller must hold r.lockfile for reading _or_ writing.
+// The caller must hold r.inProcessLock for reading or writing.
+func (r *containerStore) modified() (lockfile.LastWrite, bool, error) {
+ return r.lockfile.ModifiedSince(r.lastWrite)
+}
+
+// reloadIfChanged reloads the contents of the store from disk if it is changed.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+//
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// reloadIfChanged() with lockedForWriting could succeed.
+func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
+ lastWrite, modified, err := r.modified()
+ if err != nil {
+ return false, err
+ }
+ // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
+ // and modify no fields, to ensure they see fresh data:
+ // r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
+ // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
+ // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
+ if modified {
+ if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
+ return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
+ }
+ r.lastWrite = lastWrite
+ }
+ return false, nil
+}
+
+// Requires startReading or startWriting.
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
@@ -173,8 +398,39 @@ func (r *containerStore) Containers() ([]Container, error) {
return containers, nil
}
-func (r *containerStore) containerspath() string {
- return filepath.Join(r.dir, "containers.json")
+// This looks for datadirs in the store directory that are not referenced
+// by the json file and removes it. These can happen in the case of unclean
+// shutdowns or regular restarts in transient store mode.
+// Requires startReading.
+func (r *containerStore) GarbageCollect() error {
+ entries, err := os.ReadDir(r.dir)
+ if err != nil {
+ // Unexpected, don't try any GC
+ return err
+ }
+
+ for _, entry := range entries {
+ id := entry.Name()
+ // Does it look like a datadir directory?
+ if !entry.IsDir() || stringid.ValidateID(id) != nil {
+ continue
+ }
+
+ // Should the id be there?
+ if r.byid[id] != nil {
+ continue
+ }
+
+ // Otherwise remove datadir
+ logrus.Debugf("removing %q", filepath.Join(r.dir, id))
+ moreErr := os.RemoveAll(filepath.Join(r.dir, id))
+ // Propagate first error
+ if moreErr != nil && err == nil {
+ err = moreErr
+ }
+ }
+
+ return err
}
func (r *containerStore) datadir(id string) string {
@@ -185,84 +441,180 @@ func (r *containerStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
-func (r *containerStore) Load() error {
- needSave := false
- rpath := r.containerspath()
- data, err := ioutil.ReadFile(rpath)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
+// load reloads the contents of the store from disk.
+//
+// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
+// manage r.lastWrite.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// retrying with lockedForWriting could succeed.
+func (r *containerStore) load(lockedForWriting bool) (bool, error) {
+ var modifiedLocations containerLocations
containers := []*Container{}
- layers := make(map[string]*Container)
- idlist := []string{}
+
ids := make(map[string]*Container)
+
+ for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
+ location := containerLocationFromIndex(locationIndex)
+ rpath := r.jsonPath[locationIndex]
+
+ data, err := os.ReadFile(rpath)
+ if err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+
+ locationContainers := []*Container{}
+ if len(data) != 0 {
+ if err := json.Unmarshal(data, &locationContainers); err != nil {
+ return false, fmt.Errorf("loading %q: %w", rpath, err)
+ }
+ }
+
+ for _, container := range locationContainers {
+ // There should be no duplicated ids between json files, but lets check to be sure
+ if ids[container.ID] != nil {
+ continue // skip invalid duplicated container
+ }
+ // Remember where the container came from
+ if location == volatileContainerLocation {
+ container.volatileStore = true
+ }
+ containers = append(containers, container)
+ ids[container.ID] = container
+ }
+ }
+
+ idlist := make([]string, 0, len(containers))
+ layers := make(map[string]*Container)
names := make(map[string]*Container)
- if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
- idlist = make([]string, 0, len(containers))
- for n, container := range containers {
- idlist = append(idlist, container.ID)
- ids[container.ID] = containers[n]
- layers[container.LayerID] = containers[n]
- for _, name := range container.Names {
- if conflict, ok := names[name]; ok {
- r.removeName(conflict, name)
- needSave = true
- }
- names[name] = containers[n]
+ var errorToResolveBySaving error // == nil
+ for n, container := range containers {
+ idlist = append(idlist, container.ID)
+ layers[container.LayerID] = containers[n]
+ for _, name := range container.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
+ modifiedLocations |= containerLocation(container)
}
+ names[name] = containers[n]
}
}
+
r.containers = containers
- r.idindex = truncindex.NewTruncIndex(idlist)
+ r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
r.byid = ids
r.bylayer = layers
r.byname = names
- if needSave {
- return r.Save()
+ if errorToResolveBySaving != nil {
+ if !lockedForWriting {
+ return true, errorToResolveBySaving
+ }
+ return false, r.save(modifiedLocations)
}
- return nil
+ return false, nil
}
-func (r *containerStore) Save() error {
- if !r.Locked() {
- return errors.New("container store is not locked")
- }
- rpath := r.containerspath()
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
- return err
- }
- jdata, err := json.Marshal(&r.containers)
+// save saves the contents of the store to disk.
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
+func (r *containerStore) save(saveLocations containerLocations) error {
+ r.lockfile.AssertLockedForWriting()
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
- defer r.Touch()
- return ioutils.AtomicWriteFile(rpath, jdata, 0600)
+ r.lastWrite = lw
+ for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
+ location := containerLocationFromIndex(locationIndex)
+ if location&saveLocations == 0 {
+ continue
+ }
+ rpath := r.jsonPath[locationIndex]
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
+ return err
+ }
+ subsetContainers := make([]*Container, 0, len(r.containers))
+ for _, container := range r.containers {
+ if containerLocation(container) == location {
+ subsetContainers = append(subsetContainers, container)
+ }
+ }
+
+ jdata, err := json.Marshal(&subsetContainers)
+ if err != nil {
+ return err
+ }
+ var opts *ioutils.AtomicFileWriterOptions
+ if location == volatileContainerLocation {
+ opts = &ioutils.AtomicFileWriterOptions{
+ NoSync: true,
+ }
+ }
+ if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// saveFor saves the contents of the store relevant for modifiedContainer to disk.
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
+func (r *containerStore) saveFor(modifiedContainer *Container) error {
+ return r.save(containerLocation(modifiedContainer))
}
-func newContainerStore(dir string) (ContainerStore, error) {
- if err := os.MkdirAll(dir, 0700); err != nil {
+func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
+ if err := os.MkdirAll(dir, 0o700); err != nil {
return nil, err
}
- lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock"))
+ volatileDir := dir
+ if transient {
+ if err := os.MkdirAll(runDir, 0o700); err != nil {
+ return nil, err
+ }
+ volatileDir = runDir
+ }
+ lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock"))
if err != nil {
return nil, err
}
- lockfile.Lock()
- defer lockfile.Unlock()
cstore := containerStore{
- lockfile: lockfile,
- dir: dir,
+ lockfile: lockfile,
+ dir: dir,
+ jsonPath: [numContainerLocationIndex]string{
+ filepath.Join(dir, "containers.json"),
+ filepath.Join(volatileDir, "volatile-containers.json"),
+ },
+
containers: []*Container{},
byid: make(map[string]*Container),
bylayer: make(map[string]*Container),
byname: make(map[string]*Container),
}
- if err := cstore.Load(); err != nil {
+
+ if err := cstore.startWritingWithReload(false); err != nil {
+ return nil, err
+ }
+ cstore.lastWrite, err = cstore.lockfile.GetLastWrite()
+ if err != nil {
+ return nil, err
+ }
+ defer cstore.stopWriting()
+ if _, err := cstore.load(true); err != nil {
return nil, err
}
return &cstore, nil
}
+// Requires startReading or startWriting.
func (r *containerStore) lookup(id string) (*Container, bool) {
if container, ok := r.byid[id]; ok {
return container, ok
@@ -278,15 +630,17 @@ func (r *containerStore) lookup(id string) (*Container, bool) {
return nil, false
}
+// Requires startWriting.
func (r *containerStore) ClearFlag(id string, flag string) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
delete(container.Flags, flag)
- return r.Save()
+ return r.saveFor(container)
}
+// Requires startWriting.
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
container, ok := r.lookup(id)
if !ok {
@@ -296,10 +650,14 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
- return r.Save()
+ return r.saveFor(container)
}
-func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
+// Requires startWriting.
+func (r *containerStore) create(id string, names []string, image, layer string, options *ContainerOptions) (container *Container, err error) {
+ if options == nil {
+ options = &ContainerOptions{}
+ }
if id == "" {
id = stringid.GenerateRandomID()
_, idInUse := r.byid[id]
@@ -311,47 +669,74 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
+ names = dedupeStrings(names)
+ for _, name := range names {
+ if _, nameInUse := r.byname[name]; nameInUse {
+ return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName)
+ }
+ }
+ if err := hasOverlappingRanges(options.UIDMap); err != nil {
+ return nil, err
+ }
+ if err := hasOverlappingRanges(options.GIDMap); err != nil {
+ return nil, err
+ }
+ container = &Container{
+ ID: id,
+ Names: names,
+ ImageID: image,
+ LayerID: layer,
+ Metadata: options.Metadata,
+ BigDataNames: []string{},
+ BigDataSizes: make(map[string]int64),
+ BigDataDigests: make(map[string]digest.Digest),
+ Created: time.Now().UTC(),
+ Flags: newMapFrom(options.Flags),
+ UIDMap: copySlicePreferringNil(options.UIDMap),
+ GIDMap: copySlicePreferringNil(options.GIDMap),
+ volatileStore: options.Volatile,
+ }
if options.MountOpts != nil {
- options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts)
}
if options.Volatile {
- options.Flags["Volatile"] = true
- }
- names = dedupeNames(names)
+ container.Flags[volatileFlag] = true
+ }
+ r.containers = append(r.containers, container)
+ // This can only fail on duplicate IDs, which shouldn’t happen — and in
+ // that case the index is already in the desired state anyway.
+ // Implementing recovery from an unlikely and unimportant failure here
+ // would be too risky.
+ _ = r.idindex.Add(id)
+ r.byid[id] = container
+ r.bylayer[layer] = container
for _, name := range names {
- if _, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName,
- fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
+ r.byname[name] = container
+ }
+ defer func() {
+ if err != nil {
+ // now that the in-memory structures know about the new
+ // record, we can use regular Delete() to clean up if
+ // anything breaks from here on out
+ if e := r.Delete(id); e != nil {
+ logrus.Debugf("while cleaning up partially-created container %q we failed to create: %v", id, e)
+ }
}
+ }()
+ err = r.saveFor(container)
+ if err != nil {
+ return nil, err
}
- if err == nil {
- container = &Container{
- ID: id,
- Names: names,
- ImageID: image,
- LayerID: layer,
- Metadata: metadata,
- BigDataNames: []string{},
- BigDataSizes: make(map[string]int64),
- BigDataDigests: make(map[string]digest.Digest),
- Created: time.Now().UTC(),
- Flags: copyStringInterfaceMap(options.Flags),
- UIDMap: copyIDMap(options.UIDMap),
- GIDMap: copyIDMap(options.GIDMap),
- }
- r.containers = append(r.containers, container)
- r.byid[id] = container
- r.idindex.Add(id)
- r.bylayer[layer] = container
- for _, name := range names {
- r.byname[name] = container
- }
- err = r.Save()
- container = copyContainer(container)
+ for _, item := range options.BigData {
+ if err = r.SetBigData(id, item.Key, item.Data); err != nil {
+ return nil, err
+ }
}
+ container = copyContainer(container)
return container, err
}
+// Requires startReading or startWriting.
func (r *containerStore) Metadata(id string) (string, error) {
if container, ok := r.lookup(id); ok {
return container.Metadata, nil
@@ -359,64 +744,63 @@ func (r *containerStore) Metadata(id string) (string, error) {
return "", ErrContainerUnknown
}
+// Requires startWriting.
func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.lookup(id); ok {
container.Metadata = metadata
- return r.Save()
+ return r.saveFor(container)
}
return ErrContainerUnknown
}
+// The caller must hold r.inProcessLock for writing.
func (r *containerStore) removeName(container *Container, name string) {
container.Names = stringSliceWithoutValue(container.Names, name)
}
-func (r *containerStore) SetNames(id string, names []string) error {
- names = dedupeNames(names)
- if container, ok := r.lookup(id); ok {
- for _, name := range container.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherContainer, ok := r.byname[name]; ok {
- r.removeName(otherContainer, name)
- }
- r.byname[name] = container
+// Requires startWriting.
+func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
+ container, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ oldNames := container.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherContainer, ok := r.byname[name]; ok {
+ r.removeName(otherContainer, name)
}
- container.Names = names
- return r.Save()
+ r.byname[name] = container
}
- return ErrContainerUnknown
+ container.Names = names
+ return r.saveFor(container)
}
+// Requires startWriting.
func (r *containerStore) Delete(id string) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
id = container.ID
- toDeleteIndex := -1
- for i, candidate := range r.containers {
- if candidate.ID == id {
- toDeleteIndex = i
- break
- }
- }
delete(r.byid, id)
- r.idindex.Delete(id)
+ // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
+ // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
+ _ = r.idindex.Delete(id)
delete(r.bylayer, container.LayerID)
for _, name := range container.Names {
delete(r.byname, name)
}
- if toDeleteIndex != -1 {
- // delete the container at toDeleteIndex
- if toDeleteIndex == len(r.containers)-1 {
- r.containers = r.containers[:len(r.containers)-1]
- } else {
- r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
- }
- }
- if err := r.Save(); err != nil {
+ r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool {
+ return candidate.ID == id
+ })
+ if err := r.saveFor(container); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
@@ -425,6 +809,7 @@ func (r *containerStore) Delete(id string) error {
return nil
}
+// Requires startReading or startWriting.
func (r *containerStore) Get(id string) (*Container, error) {
if container, ok := r.lookup(id); ok {
return copyContainer(container), nil
@@ -432,6 +817,7 @@ func (r *containerStore) Get(id string) (*Container, error) {
return nil, ErrContainerUnknown
}
+// Requires startReading or startWriting.
func (r *containerStore) Lookup(name string) (id string, err error) {
if container, ok := r.lookup(name); ok {
return container.ID, nil
@@ -439,34 +825,34 @@ func (r *containerStore) Lookup(name string) (id string, err error) {
return "", ErrContainerUnknown
}
+// Requires startReading or startWriting.
func (r *containerStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
+// Requires startReading or startWriting.
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
- return ioutil.ReadFile(r.datapath(c.ID, key))
+ return os.ReadFile(r.datapath(c.ID, key))
}
+// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
- if c.BigDataSizes == nil {
- c.BigDataSizes = make(map[string]int64)
- }
- if size, ok := c.BigDataSizes[key]; ok {
+ if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@@ -485,18 +871,16 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown
}
+// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
- if c.BigDataDigests == nil {
- c.BigDataDigests = make(map[string]digest.Digest)
- }
- if d, ok := c.BigDataDigests[key]; ok {
+ if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@@ -515,26 +899,28 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
return "", ErrDigestUnknown
}
+// Requires startReading or startWriting.
func (r *containerStore) BigDataNames(id string) ([]string, error) {
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
- return copyStringSlice(c.BigDataNames), nil
+ return copySlicePreferringNil(c.BigDataNames), nil
}
+// Requires startWriting.
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
+ return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
- if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil {
+ if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil {
return err
}
- err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
+ err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600)
if err == nil {
save := false
if c.BigDataSizes == nil {
@@ -551,24 +937,18 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
- addName := true
- for _, name := range c.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(c.BigDataNames, key) {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
if save {
- err = r.Save()
+ err = r.saveFor(c)
}
}
return err
}
+// Requires startWriting.
func (r *containerStore) Wipe() error {
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
@@ -581,50 +961,3 @@ func (r *containerStore) Wipe() error {
}
return nil
}
-
-func (r *containerStore) Lock() {
- r.lockfile.Lock()
-}
-
-func (r *containerStore) RecursiveLock() {
- r.lockfile.RecursiveLock()
-}
-
-func (r *containerStore) RLock() {
- r.lockfile.RLock()
-}
-
-func (r *containerStore) Unlock() {
- r.lockfile.Unlock()
-}
-
-func (r *containerStore) Touch() error {
- return r.lockfile.Touch()
-}
-
-func (r *containerStore) Modified() (bool, error) {
- return r.lockfile.Modified()
-}
-
-func (r *containerStore) IsReadWrite() bool {
- return r.lockfile.IsReadWrite()
-}
-
-func (r *containerStore) TouchedSince(when time.Time) bool {
- return r.lockfile.TouchedSince(when)
-}
-
-func (r *containerStore) Locked() bool {
- return r.lockfile.Locked()
-}
-
-func (r *containerStore) ReloadIfChanged() error {
- r.loadMut.Lock()
- defer r.loadMut.Unlock()
-
- modified, err := r.Modified()
- if err == nil && modified {
- return r.Load()
- }
- return err
-}
diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go
new file mode 100644
index 000000000..76ae6328b
--- /dev/null
+++ b/vendor/github.com/containers/storage/deprecated.go
@@ -0,0 +1,214 @@
+package storage
+
+import (
+ "io"
+ "time"
+
+ drivers "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// The type definitions in this file exist ONLY to maintain formal API compatibility.
+// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES.
+
+// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROFileBasedStore interface {
+ Locker
+ Load() error
+ ReloadIfChanged() error
+}
+
+// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type RWFileBasedStore interface {
+ Save() error
+}
+
+// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type FileBasedStore interface {
+ ROFileBasedStore
+ RWFileBasedStore
+}
+
+// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROMetadataStore interface {
+ Metadata(id string) (string, error)
+}
+
+// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type RWMetadataStore interface {
+ SetMetadata(id, metadata string) error
+}
+
+// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type MetadataStore interface {
+ ROMetadataStore
+ RWMetadataStore
+}
+
+// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROBigDataStore interface {
+ BigData(id, key string) ([]byte, error)
+ BigDataSize(id, key string) (int64, error)
+ BigDataDigest(id, key string) (digest.Digest, error)
+ BigDataNames(id string) ([]string, error)
+}
+
+// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type RWImageBigDataStore interface {
+ SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
+}
+
+// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ContainerBigDataStore interface {
+ ROBigDataStore
+ SetBigData(id, key string, data []byte) error
+}
+
+// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROLayerBigDataStore interface {
+ BigData(id, key string) (io.ReadCloser, error)
+ BigDataNames(id string) ([]string, error)
+}
+
+// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type RWLayerBigDataStore interface {
+ SetBigData(id, key string, data io.Reader) error
+}
+
+// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type LayerBigDataStore interface {
+ ROLayerBigDataStore
+ RWLayerBigDataStore
+}
+
+// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type FlaggableStore interface {
+ ClearFlag(id string, flag string) error
+ SetFlag(id string, flag string, value interface{}) error
+}
+
+// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ContainerStore interface {
+ FileBasedStore
+ MetadataStore
+ ContainerBigDataStore
+ FlaggableStore
+ Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
+ SetNames(id string, names []string) error
+ AddNames(id string, names []string) error
+ RemoveNames(id string, names []string) error
+ Get(id string) (*Container, error)
+ Exists(id string) bool
+ Delete(id string) error
+ Wipe() error
+ Lookup(name string) (string, error)
+ Containers() ([]Container, error)
+}
+
+// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROImageStore interface {
+ ROFileBasedStore
+ ROMetadataStore
+ ROBigDataStore
+ Exists(id string) bool
+ Get(id string) (*Image, error)
+ Lookup(name string) (string, error)
+ Images() ([]Image, error)
+ ByDigest(d digest.Digest) ([]*Image, error)
+}
+
+// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ImageStore interface {
+ ROImageStore
+ RWFileBasedStore
+ RWMetadataStore
+ RWImageBigDataStore
+ FlaggableStore
+ Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
+ SetNames(id string, names []string) error
+ AddNames(id string, names []string) error
+ RemoveNames(id string, names []string) error
+ Delete(id string) error
+ Wipe() error
+}
+
+// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type ROLayerStore interface {
+ ROFileBasedStore
+ ROMetadataStore
+ ROLayerBigDataStore
+ Exists(id string) bool
+ Get(id string) (*Layer, error)
+ Status() ([][2]string, error)
+ Changes(from, to string) ([]archive.Change, error)
+ Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
+ DiffSize(from, to string) (int64, error)
+ Size(name string) (int64, error)
+ Lookup(name string) (string, error)
+ LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
+ LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
+ Layers() ([]Layer, error)
+}
+
+// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
+//
+// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
+type LayerStore interface {
+ ROLayerStore
+ RWFileBasedStore
+ RWMetadataStore
+ FlaggableStore
+ RWLayerBigDataStore
+ Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
+ CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
+ Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
+ SetNames(id string, names []string) error
+ AddNames(id string, names []string) error
+ RemoveNames(id string, names []string) error
+ Delete(id string) error
+ Wipe() error
+ Mount(id string, options drivers.MountOpts) (string, error)
+ Unmount(id string, force bool) (bool, error)
+ Mounted(id string) (int, error)
+ ParentOwners(id string) (uids, gids []int, err error)
+ ApplyDiff(to string, diff io.Reader) (int64, error)
+ ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+ DifferTarget(id string) (string, error)
+ LoadLocked() error
+ PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index afbd23a75..a8312811b 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
/*
@@ -24,11 +24,11 @@ package aufs
import (
"bufio"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
- "os/exec"
"path"
"path/filepath"
"strings"
@@ -39,14 +39,14 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
@@ -54,27 +54,25 @@ import (
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
- ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
+ ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
- ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
+ ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace")
backingFs = ""
enableDirpermLock sync.Once
enableDirperm bool
)
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
- graphdriver.Register("aufs", Init)
+ graphdriver.MustRegister("aufs", Init)
}
// Driver contains information about the filesystem mounted.
type Driver struct {
sync.Mutex
root string
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
@@ -86,11 +84,9 @@ type Driver struct {
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
-
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
- return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
-
+ return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
}
fsMagic, err := graphdriver.GetFSMagic(home)
@@ -104,7 +100,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
+ return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
var mountOptions string
@@ -129,22 +125,16 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
a := &Driver{
root: home,
- uidMaps: options.UIDMaps,
- gidMaps: options.GIDMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
locker: locker.New(),
mountOptions: mountOptions,
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
- if err != nil {
- return nil, err
- }
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
- if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
+ if err := os.MkdirAll(home, 0o700); err != nil {
if os.IsExist(err) {
return a, nil
}
@@ -157,7 +147,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Populate the dir structure
for _, p := range paths {
- if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil {
+ if err := os.MkdirAll(path.Join(home, p), 0o700); err != nil {
return nil, err
}
}
@@ -168,7 +158,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(home, path)
- entries, err := ioutil.ReadDir(p)
+ entries, err := os.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
@@ -191,14 +181,8 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Return a nil error if the kernel supports aufs
-// We cannot modprobe because inside dind modprobe fails
-// to run
func supportsAufs() error {
- // We can try to modprobe aufs first before looking at
- // proc/filesystems for when aufs is supported
- exec.Command("modprobe", "aufs").Run()
-
- if rsystem.RunningInUserNS() {
+ if unshare.IsRootless() {
return ErrAufsNested
}
@@ -238,18 +222,35 @@ func (a *Driver) Status() [][2]string {
// Metadata not implemented
func (a *Driver) Metadata(id string) (map[string]string, error) {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
// Exists returns true if the given id is registered with
// this driver
func (a *Driver) Exists(id string) bool {
- if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
+ if err := fileutils.Lexists(path.Join(a.rootPath(), "layers", id)); err != nil {
return false
}
return true
}
+// ListLayers() returns all of the layers known to the driver.
+func (a *Driver) ListLayers() ([]string, error) {
+ diffsDir := filepath.Join(a.rootPath(), "diff")
+ entries, err := os.ReadDir(diffsDir)
+ if err != nil {
+ return nil, err
+ }
+ results := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ continue
+ }
+ results = append(results, entry.Name())
+ }
+ return results, nil
+}
+
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
return nil
@@ -272,7 +273,6 @@ func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
// Create three folders for each id
// mnt, layers, and diff
func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
-
if opts != nil && len(opts.StorageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs")
}
@@ -318,7 +318,7 @@ func (a *Driver) createDirsFor(id, parent string) error {
// The path of directories are /mnt/
// and /diff/
for _, p := range paths {
- rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair()
+ rootPair := idtools.IDPair{UID: 0, GID: 0}
rootPerms := defaultPerms
if parent != "" {
st, err := system.Stat(path.Join(a.rootPath(), p, parent))
@@ -339,7 +339,9 @@ func (a *Driver) createDirsFor(id, parent string) error {
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
a.locker.Lock(id)
- defer a.locker.Unlock(id)
+ defer func() {
+ _ = a.locker.Unlock(id)
+ }()
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
@@ -372,10 +374,10 @@ func (a *Driver) Remove(id string) error {
}
if err != unix.EBUSY {
- return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err)
}
if retries >= 5 {
- return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err)
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
@@ -385,21 +387,21 @@ func (a *Driver) Remove(id string) error {
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing layers dir for %s", id)
+ return fmt.Errorf("removing layers dir for %s: %w", id, err)
}
if err := atomicRemove(a.getDiffPath(id)); err != nil {
- return errors.Wrapf(err, "could not remove diff path for id %s", id)
+ return fmt.Errorf("could not remove diff path for id %s: %w", id, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that container runtime doesn't find it anymore) before doing removal of
// the whole tree.
if err := atomicRemove(mountpoint); err != nil {
- if errors.Cause(err) == unix.EBUSY {
+ if errors.Is(err, unix.EBUSY) {
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
}
- return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
+ return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err)
}
a.pathCacheLock.Lock()
@@ -416,11 +418,11 @@ func atomicRemove(source string) error {
case err == nil, os.IsNotExist(err):
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
- if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
+ if e := fileutils.Exists(source); !os.IsNotExist(e) {
+ return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err)
}
default:
- return errors.Wrapf(err, "error preparing atomic delete")
+ return fmt.Errorf("preparing atomic delete: %w", err)
}
return system.EnsureRemoveAll(target)
@@ -430,7 +432,10 @@ func atomicRemove(source string) error {
// This will mount the dir at its given path
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
a.locker.Lock(id)
- defer a.locker.Unlock(id)
+ defer func() {
+ _ = a.locker.Unlock(id)
+ }()
+
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
@@ -467,7 +472,10 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
a.locker.Lock(id)
- defer a.locker.Unlock(id)
+ defer func() {
+ _ = a.locker.Unlock(id)
+ }()
+
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
@@ -490,7 +498,9 @@ func (a *Driver) Put(id string) error {
// For AUFS, it queries the mountpoint for this ID.
func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
a.locker.Lock(id)
- defer a.locker.Unlock(id)
+ defer func() {
+ _ = a.locker.Unlock(id)
+ }()
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
@@ -624,7 +634,7 @@ func (a *Driver) mount(id string, target string, layers []string, options graphd
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, options); err != nil {
- return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
+ return fmt.Errorf("creating aufs mount to %s: %w", target, err)
}
return nil
}
@@ -649,11 +659,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) {
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
var dirs []string
- if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
+ if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
- if !info.IsDir() {
+ if !d.IsDir() {
return nil
}
dirs = append(dirs, path)
@@ -673,7 +683,9 @@ func (a *Driver) Cleanup() error {
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
- Unmount(target)
+ if err1 := Unmount(target); err1 != nil {
+ logrus.Warnf("Unmount %q: %v", target, err1)
+ }
}
}()
@@ -728,16 +740,16 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
- base, err := ioutil.TempDir("", "storage-aufs-base")
+ base, err := os.MkdirTemp("", "storage-aufs-base")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
- union, err := ioutil.TempDir("", "storage-aufs-union")
+ union, err := os.MkdirTemp("", "storage-aufs-union")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
@@ -748,7 +760,7 @@ func useDirperm() bool {
}
enableDirperm = true
if err := Unmount(union); err != nil {
- logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
+ logrus.Errorf("Checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
index d2325fc46..9587bf63c 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
@@ -1,17 +1,16 @@
-// +build linux
+//go:build linux
package aufs
import (
"bufio"
- "io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
- dirs, err := ioutil.ReadDir(root)
+ dirs, err := os.ReadDir(root)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go
index 100e7537a..51b3d6dfa 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/mount.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package aufs
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
deleted file mode 100644
index d030b0663..000000000
--- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package aufs
-
-import "errors"
-
-// MsRemount declared to specify a non-linux system mount.
-const MsRemount = 0
-
-func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
- return errors.New("mount is not implemented on this platform")
-}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 3903b1ddd..c7f37d97e 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -1,10 +1,13 @@
-// +build linux,cgo
+//go:build linux && cgo
package btrfs
/*
#include
#include
+
+// keep struct field name compatible with btrfs-progs < 6.1.
+#define max_referenced max_rfer
#include
#include
@@ -16,7 +19,7 @@ import "C"
import (
"fmt"
- "io/ioutil"
+ "io/fs"
"math"
"os"
"path"
@@ -28,21 +31,21 @@ import (
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
- graphdriver.Register("btrfs", Init)
+ graphdriver.MustRegister("btrfs", Init)
}
type btrfsOptions struct {
@@ -53,21 +56,16 @@ type btrfsOptions struct {
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
-
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsMagic != graphdriver.FsMagicBtrfs {
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
+ return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
- if err != nil {
- return nil, err
- }
- if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
+ if err := os.MkdirAll(filepath.Join(home, "subvolumes"), 0o700); err != nil {
return nil, err
}
@@ -82,8 +80,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
driver := &Driver{
home: home,
- uidMaps: options.UIDMaps,
- gidMaps: options.GIDMaps,
options: opt,
}
@@ -116,7 +112,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
case "btrfs.mountopt":
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
default:
- return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
+ return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option)
}
}
return options, userDiskQuota, nil
@@ -124,10 +120,8 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
// Driver contains information about the filesystem mounted.
type Driver struct {
- //root of the file system
+ // root of the file system
home string
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
options btrfsOptions
quotaEnabled bool
once sync.Once
@@ -154,7 +148,7 @@ func (d *Driver) Status() [][2]string {
// Metadata returns empty metadata for this driver.
func (d *Driver) Metadata(id string) (map[string]string, error) {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
// Cleanup unmounts the home directory.
@@ -172,7 +166,7 @@ func openDir(path string) (*C.DIR, error) {
dir := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
+ return nil, fmt.Errorf("can't open dir %s", path)
}
return dir, nil
}
@@ -202,7 +196,7 @@ func subvolCreate(path, name string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs subvolume: %w", errno)
}
return nil
}
@@ -223,14 +217,14 @@ func subvolSnapshot(src, dest, name string) error {
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(getDirFd(srcDir))
- var cs = C.CString(name)
+ cs := C.CString(name)
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs snapshot: %w", errno)
}
return nil
}
@@ -256,32 +250,32 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
var args C.struct_btrfs_ioctl_vol_args
// walk the btrfs subvolumes
- walkSubvolumes := func(p string, f os.FileInfo, err error) error {
+ walkSubvolumes := func(p string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) && p != fullPath {
// missing most likely because the path was a subvolume that got removed in the previous iteration
// since it's gone anyway, we don't care
return nil
}
- return fmt.Errorf("error walking subvolumes: %v", err)
+ return fmt.Errorf("walking subvolumes: %w", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
- if f.IsDir() && p != fullPath {
+ if d.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
- return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
+ return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err)
}
if sv {
- if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil {
- return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
+ if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil {
+ return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err)
}
}
}
return nil
}
- if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil {
- return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
+ if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil {
+ return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err)
}
if quotaEnabled {
@@ -307,7 +301,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
+ return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno)
}
return nil
}
@@ -343,7 +337,7 @@ func (d *Driver) enableQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno)
}
d.quotaEnabled = true
@@ -368,7 +362,7 @@ func (d *Driver) subvolRescanQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno)
}
return nil
@@ -382,12 +376,12 @@ func subvolLimitQgroup(path string, size uint64) error {
defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args
- args.lim.max_referenced = C.__u64(size)
+ args.lim.max_rfer = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno)
}
return nil
@@ -416,11 +410,11 @@ func qgroupStatus(path string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
+ return fmt.Errorf("failed to search qgroup for %s: %w", path, errno)
}
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
- return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
+ return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type)
}
return nil
}
@@ -438,10 +432,10 @@ func subvolLookupQgroup(path string) (uint64, error) {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
+ return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno)
}
if args.treeid == 0 {
- return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
+ return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir)
}
return uint64(args.treeid), nil
@@ -476,13 +470,9 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
// Create the filesystem with given id.
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
- quotas := path.Join(d.home, "quotas")
- subvolumes := path.Join(d.home, "subvolumes")
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return err
- }
- if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
+ quotas := d.quotasDir()
+ subvolumes := d.subvolumesDir()
+ if err := os.MkdirAll(subvolumes, 0o700); err != nil {
return err
}
if parent == "" {
@@ -520,18 +510,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
- if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
- return err
- }
- if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
+ if err := os.MkdirAll(quotas, 0o700); err != nil {
return err
}
- }
-
- // if we have a remapped root (user namespaces enabled), change the created snapshot
- // dir ownership to match
- if rootUID != 0 || rootGID != 0 {
- if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
+ if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
return err
}
}
@@ -557,7 +539,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.size = uint64(size)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s (%q)", key, storageOpt)
}
}
@@ -587,11 +569,11 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error {
// Remove the filesystem with given id.
func (d *Driver) Remove(id string) error {
dir := d.subvolumesDirID(id)
- if _, err := os.Stat(dir); err != nil {
+ if err := fileutils.Exists(dir); err != nil {
return err
}
quotasDir := d.quotasDirID(id)
- if _, err := os.Stat(quotasDir); err == nil {
+ if err := fileutils.Exists(quotasDir); err == nil {
if err := os.Remove(quotasDir); err != nil {
return err
}
@@ -626,23 +608,18 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
- switch len(options.Options) {
- case 0:
- case 1:
- if options.Options[0] == "ro" {
+ for _, opt := range options.Options {
+ if opt == "ro" {
// ignore "ro" option
- break
+ continue
}
- fallthrough
- default:
return "", fmt.Errorf("btrfs driver does not support mount options")
}
-
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}
- if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
+ if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.enableQuota(); err != nil {
return "", err
@@ -672,10 +649,26 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
// Exists checks if the id exists in the filesystem.
func (d *Driver) Exists(id string) bool {
dir := d.subvolumesDirID(id)
- _, err := os.Stat(dir)
+ err := fileutils.Exists(dir)
return err == nil
}
+// List all of the layers known to the driver.
+func (d *Driver) ListLayers() ([]string, error) {
+ entries, err := os.ReadDir(d.subvolumesDir())
+ if err != nil {
+ return nil, err
+ }
+ results := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ continue
+ }
+ results = append(results, entry.Name())
+ }
+ return results, nil
+}
+
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
index f07088887..a4d77eaad 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
@@ -1,3 +1,3 @@
-// +build !linux !cgo
+//go:build !linux || !cgo
package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go
index edd8bdab8..4f5d8a5b9 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go
@@ -1,4 +1,4 @@
-// +build linux,!btrfs_noversion,cgo
+//go:build linux && !btrfs_noversion && cgo
package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
index 905e834e3..58c1b0d0c 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
@@ -1,4 +1,4 @@
-// +build !linux btrfs_noversion !cgo
+//go:build linux && btrfs_noversion && cgo
package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index 63bfd2d13..d728e919b 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -2,12 +2,14 @@ package graphdriver
import (
"bytes"
+ "errors"
"fmt"
+ "io/fs"
"os"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
- "github.com/opencontainers/selinux/pkg/pwalk"
+ "github.com/opencontainers/selinux/pkg/pwalkdir"
)
const (
@@ -50,13 +52,17 @@ func chownByMapsMain() {
if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 {
toHost = nil
}
- chown := func(path string, info os.FileInfo, _ error) error {
- if path == "." {
+
+ chowner := newLChowner()
+
+ var chown fs.WalkDirFunc = func(path string, d fs.DirEntry, _ error) error {
+ info, err := d.Info()
+ if path == "." || err != nil {
return nil
}
- return platformLChown(path, info, toHost, toContainer)
+ return chowner.LChown(path, info, toHost, toContainer)
}
- if err := pwalk.Walk(".", chown); err != nil {
+ if err := pwalkdir.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
os.Exit(1)
}
@@ -84,13 +90,13 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error
cmd.Stdin = bytes.NewReader(config)
output, err := cmd.CombinedOutput()
if len(output) > 0 && err != nil {
- return fmt.Errorf("%v: %s", err, string(output))
+ return fmt.Errorf("%s: %w", string(output), err)
}
if err != nil {
return err
}
if len(output) > 0 {
- return fmt.Errorf("%s", string(output))
+ return errors.New(string(output))
}
return nil
@@ -111,7 +117,7 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
// on-disk owner UIDs and GIDs which are "host" values in the first map with
// UIDs and GIDs for "host" values from the second map which correspond to the
// same "container" IDs.
-func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
+func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) {
driver := n.ProtoDriver
options := MountOpts{
MountLabel: mountLabel,
@@ -120,9 +126,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost
if err != nil {
return err
}
- defer func() {
- driver.Put(id)
- }()
+ defer driverPut(driver, id, &retErr)
return ChownPathByMaps(layerFs, toContainer, toHost)
}
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go
new file mode 100644
index 000000000..882fa04bd
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go
@@ -0,0 +1,108 @@
+//go:build darwin
+
+package graphdriver
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+)
+
+type inode struct {
+ Dev uint64
+ Ino uint64
+}
+
+type platformChowner struct {
+ mutex sync.Mutex
+ inodes map[inode]bool
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{
+ inodes: make(map[inode]bool),
+ }
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+ st, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ i := inode{
+ Dev: uint64(st.Dev),
+ Ino: uint64(st.Ino),
+ }
+ c.mutex.Lock()
+ _, found := c.inodes[i]
+ if !found {
+ c.inodes[i] = true
+ }
+ c.mutex.Unlock()
+
+ if found {
+ return nil
+ }
+
+ // Map an on-disk UID/GID pair from host to container
+ // using the first map, then back to the host using the
+ // second map. Skip that first step if they're 0, to
+ // compensate for cases where a parent layer should
+ // have had a mapped value, but didn't.
+ uid, gid := int(st.Uid), int(st.Gid)
+ if toContainer != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedUID, mappedGID, err := toContainer.ToContainer(pair)
+ if err != nil {
+ if (uid != 0) || (gid != 0) {
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
+ }
+ mappedUID, mappedGID = uid, gid
+ }
+ uid, gid = mappedUID, mappedGID
+ }
+ if toHost != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedPair, err := toHost.ToHostOverflow(pair)
+ if err != nil {
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
+ }
+ uid, gid = mappedPair.UID, mappedPair.GID
+ }
+ if uid != int(st.Uid) || gid != int(st.Gid) {
+ capability, err := system.Lgetxattr(path, "security.capability")
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+
+ // Make the change.
+ if err := system.Lchown(path, uid, gid); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ // Restore the SUID and SGID bits if they were originally set.
+ if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
+ if err := system.Chmod(path, info.Mode()); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ }
+ if capability != nil {
+ if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ }
+
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 0387adfc1..808f4fea6 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows && !darwin
package graphdriver
@@ -6,17 +6,68 @@ import (
"errors"
"fmt"
"os"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
)
-func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+type inode struct {
+ Dev uint64
+ Ino uint64
+}
+
+type platformChowner struct {
+ mutex sync.Mutex
+ inodes map[inode]string
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{
+ inodes: make(map[inode]string),
+ }
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
+
+ i := inode{
+ Dev: uint64(st.Dev),
+ Ino: uint64(st.Ino),
+ }
+
+ c.mutex.Lock()
+
+ oldTarget, found := c.inodes[i]
+ if !found {
+ c.inodes[i] = path
+ }
+
+ // If we are dealing with a file with multiple links then keep the lock until the file is
+ // chowned to avoid a race where we link to the old version if the file is copied up.
+ if found || st.Nlink > 1 {
+ defer c.mutex.Unlock()
+ } else {
+ c.mutex.Unlock()
+ }
+
+ if found {
+ // If the dev/inode was already chowned then create a link to the old target instead
+ // of chowning it again. This is necessary when the underlying file system breaks
+ // inodes on copy-up (as it is with overlay with index=off) to maintain the original
+ // link and correct file ownership.
+
+ // The target already exists so remove it before creating the link to the new target.
+ if err := os.Remove(path); err != nil {
+ return err
+ }
+ return os.Link(oldTarget, path)
+ }
+
// Map an on-disk UID/GID pair from host to container
// using the first map, then back to the host using the
// second map. Skip that first step if they're 0, to
@@ -31,7 +82,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
- return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
@@ -42,31 +93,31 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
UID: uid,
GID: gid,
}
- mappedPair, err := toHost.ToHost(pair)
+ mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
- return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
- if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go
index 31bd5bb52..6c2bd2ca2 100644
--- a/vendor/github.com/containers/storage/drivers/chown_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chown_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package graphdriver
@@ -9,6 +9,12 @@ import (
"github.com/containers/storage/pkg/idtools"
)
-func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+type platformChowner struct{}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{}
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
return &os.PathError{"lchown", path, syscall.EWINDOWS}
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go
index c8c4905bf..2aa3e9e6b 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go
@@ -1,4 +1,4 @@
-// +build linux darwin freebsd solaris
+//go:build !windows
package graphdriver
@@ -12,10 +12,10 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chroot(path); err != nil {
- return fmt.Errorf("error chrooting to %q: %v", path, err)
+ return fmt.Errorf("chrooting to %q: %w", path, err)
}
if err := syscall.Chdir(string(os.PathSeparator)); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go
index f4dc22a96..d5eb5ed98 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go
@@ -9,7 +9,7 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chdir(path); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index c2156861f..8c0bbed61 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -1,4 +1,4 @@
-// +build cgo
+//go:build cgo
package copy
@@ -10,12 +10,12 @@ package copy
#endif
*/
import "C"
+
import (
"container/list"
"errors"
"fmt"
"io"
- "net"
"os"
"path/filepath"
"strings"
@@ -26,7 +26,6 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
@@ -41,7 +40,7 @@ const (
)
// CopyRegularToFile copies the content of a file to another
-func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint
+func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
srcFile, err := os.Open(srcPath)
if err != nil {
return err
@@ -49,13 +48,13 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
defer srcFile.Close()
if *copyWithFileClone {
- _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
- if err == nil {
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
+ if errno == 0 {
return nil
}
*copyWithFileClone = false
- if err == unix.EXDEV {
+ if errno == unix.EXDEV {
*copyWithFileRange = false
}
}
@@ -73,7 +72,7 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
-func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint
+func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
// If the destination file already exists, we shouldn't blow it away
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
if err != nil {
@@ -154,7 +153,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
dstPath := filepath.Join(dstDir, relPath)
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
- return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+ return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
@@ -199,14 +198,12 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
case mode&os.ModeSocket != 0:
- s, err := net.Listen("unix", dstPath)
- if err != nil {
+ if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}
- s.Close()
case mode&os.ModeDevice != 0:
- if rsystem.RunningInUserNS() {
+ if unshare.IsRootless() {
// cannot create a device if running in user namespace
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
index e97523c35..baaa86ddc 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
@@ -1,6 +1,6 @@
-// +build !linux !cgo
+//go:build !linux || !cgo
-package copy
+package copy //nolint: predeclared
import (
"io"
@@ -24,7 +24,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
}
// CopyRegularToFile copies the content of a file to another
-func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
+func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath)
if err != nil {
return err
@@ -35,6 +35,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
-func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
+func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
}
diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go
index 3fc45495b..964dcaf2f 100644
--- a/vendor/github.com/containers/storage/drivers/counter.go
+++ b/vendor/github.com/containers/storage/drivers/counter.go
@@ -53,11 +53,16 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
}
} else if !c.checker.IsMounted(path) {
// if the unmount was performed outside of this process (e.g. conmon cleanup)
- //the ref counter would lose track of it. Check if it is still mounted.
+ // the ref counter would lose track of it. Check if it is still mounted.
m.count = 0
}
infoOp(m)
count := m.count
+ if count <= 0 {
+ // If the mounted path has been decremented enough have no references,
+ // then its entry can be removed.
+ delete(c.counts, path)
+ }
c.mu.Unlock()
return count
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
deleted file mode 100644
index c23097b76..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// +build linux,cgo
-
-package devmapper
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-type directLVMConfig struct {
- Device string
- ThinpPercent uint64
- ThinpMetaPercent uint64
- AutoExtendPercent uint64
- AutoExtendThreshold uint64
- MetaDataSize string
-}
-
-var (
- errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
- errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
- errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm")
-)
-
-func validateLVMConfig(cfg directLVMConfig) error {
- if cfg.Device == "" {
- return errMissingSetupDevice
- }
- if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
- return errThinpPercentMissing
- }
-
- if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
- return errThinpPercentTooBig
- }
- return nil
-}
-
-func checkDevAvailable(dev string) error {
- lvmScan, err := exec.LookPath("lvmdiskscan")
- if err != nil {
- logrus.Debug("could not find lvmdiskscan")
- return nil
- }
-
- out, err := exec.Command(lvmScan).CombinedOutput()
- if err != nil {
- logrus.WithError(err).Error(string(out))
- return nil
- }
-
- if !bytes.Contains(out, []byte(dev)) {
- return errors.Errorf("%s is not available for use with devicemapper", dev)
- }
- return nil
-}
-
-func checkDevInVG(dev string) error {
- pvDisplay, err := exec.LookPath("pvdisplay")
- if err != nil {
- logrus.Debug("could not find pvdisplay")
- return nil
- }
-
- out, err := exec.Command(pvDisplay, dev).CombinedOutput()
- if err != nil {
- logrus.WithError(err).Error(string(out))
- return nil
- }
-
- scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
- for scanner.Scan() {
- fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
- if len(fields) > 1 {
- // got "VG Name" line"
- vg := strings.TrimSpace(fields[1])
- if len(vg) > 0 {
- return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
- }
- logrus.Error(fields)
- break
- }
- }
- return nil
-}
-
-func checkDevHasFS(dev string) error {
- blkid, err := exec.LookPath("blkid")
- if err != nil {
- logrus.Debug("could not find blkid")
- return nil
- }
-
- out, err := exec.Command(blkid, dev).CombinedOutput()
- if err != nil {
- logrus.WithError(err).Error(string(out))
- return nil
- }
-
- fields := bytes.Fields(out)
- for _, f := range fields {
- kv := bytes.Split(f, []byte{'='})
- if bytes.Equal(kv[0], []byte("TYPE")) {
- v := bytes.Trim(kv[1], "\"")
- if len(v) > 0 {
- return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
- }
- return nil
- }
- }
- return nil
-}
-
-func verifyBlockDevice(dev string, force bool) error {
- absPath, err := filepath.Abs(dev)
- if err != nil {
- return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
- }
- realPath, err := filepath.EvalSymlinks(absPath)
- if err != nil {
- return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
- }
- if err := checkDevAvailable(absPath); err != nil {
- logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
- if err := checkDevAvailable(realPath); err != nil {
- return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath)
- }
- }
- if err := checkDevInVG(realPath); err != nil {
- return err
- }
-
- if force {
- return nil
- }
-
- if err := checkDevHasFS(realPath); err != nil {
- return err
- }
- return nil
-}
-
-func readLVMConfig(root string) (directLVMConfig, error) {
- var cfg directLVMConfig
-
- p := filepath.Join(root, "setup-config.json")
- b, err := ioutil.ReadFile(p)
- if err != nil {
- if os.IsNotExist(err) {
- return cfg, nil
- }
- return cfg, errors.Wrap(err, "error reading existing setup config")
- }
-
- // check if this is just an empty file, no need to produce a json error later if so
- if len(b) == 0 {
- return cfg, nil
- }
-
- err = json.Unmarshal(b, &cfg)
- return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
-}
-
-func writeLVMConfig(root string, cfg directLVMConfig) error {
- p := filepath.Join(root, "setup-config.json")
- b, err := json.Marshal(cfg)
- if err != nil {
- return errors.Wrap(err, "error marshalling direct lvm config")
- }
- err = ioutil.WriteFile(p, b, 0600)
- return errors.Wrap(err, "error writing direct lvm config to file")
-}
-
-func setupDirectLVM(cfg directLVMConfig) error {
- lvmProfileDir := "/etc/lvm/profile"
- binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
-
- for _, bin := range binaries {
- if _, err := exec.LookPath(bin); err != nil {
- return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
- }
- }
-
- err := os.MkdirAll(lvmProfileDir, 0755)
- if err != nil {
- return errors.Wrap(err, "error creating lvm profile directory")
- }
-
- if cfg.AutoExtendPercent == 0 {
- cfg.AutoExtendPercent = 20
- }
-
- if cfg.AutoExtendThreshold == 0 {
- cfg.AutoExtendThreshold = 80
- }
-
- if cfg.ThinpPercent == 0 {
- cfg.ThinpPercent = 95
- }
- if cfg.ThinpMetaPercent == 0 {
- cfg.ThinpMetaPercent = 1
- }
- if cfg.MetaDataSize == "" {
- cfg.MetaDataSize = "128k"
- }
-
- out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
- if err != nil {
- return errors.Wrap(err, string(out))
- }
-
- out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
- if err != nil {
- return errors.Wrap(err, string(out))
- }
-
- out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
- if err != nil {
- return errors.Wrap(err, string(out))
- }
- out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
- if err != nil {
- return errors.Wrap(err, string(out))
- }
-
- out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
- if err != nil {
- return errors.Wrap(err, string(out))
- }
-
- profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
- err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
- if err != nil {
- return errors.Wrap(err, "error writing storage thinp autoextend profile")
- }
-
- out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
- return errors.Wrap(err, string(out))
-}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
deleted file mode 100644
index 19fb3fda9..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ /dev/null
@@ -1,2869 +0,0 @@
-// +build linux,cgo
-
-package devmapper
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "time"
-
- graphdriver "github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/devicemapper"
- "github.com/containers/storage/pkg/dmesg"
- "github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/loopback"
- "github.com/containers/storage/pkg/mount"
- "github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/parsers/kernel"
- units "github.com/docker/go-units"
- "github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "golang.org/x/sys/unix"
-)
-
-var (
- defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
- defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
- defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
- defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
- defaultUdevSyncOverride = false
- maxDeviceID = 0xffffff // 24 bit, pool limit
- deviceIDMapSz = (maxDeviceID + 1) / 8
- driverDeferredRemovalSupport = false
- enableDeferredRemoval = false
- enableDeferredDeletion = false
- userBaseSize = false
- defaultMinFreeSpacePercent uint32 = 10
- lvmSetupConfigForce bool
-)
-
-const (
- deviceSetMetaFile = "deviceset-metadata"
- transactionMetaFile = "transaction-metadata"
- xfs = "xfs"
- ext4 = "ext4"
- base = "base"
-)
-
-type transaction struct {
- OpenTransactionID uint64 `json:"open_transaction_id"`
- DeviceIDHash string `json:"device_hash"`
- DeviceID int `json:"device_id"`
-}
-
-type devInfo struct {
- Hash string `json:"-"`
- DeviceID int `json:"device_id"`
- Size uint64 `json:"size"`
- TransactionID uint64 `json:"transaction_id"`
- Initialized bool `json:"initialized"`
- Deleted bool `json:"deleted"`
- devices *DeviceSet
-
- // The global DeviceSet lock guarantees that we serialize all
- // the calls to libdevmapper (which is not threadsafe), but we
- // sometimes release that lock while sleeping. In that case
- // this per-device lock is still held, protecting against
- // other accesses to the device that we're doing the wait on.
- //
- // WARNING: In order to avoid AB-BA deadlocks when releasing
- // the global lock while holding the per-device locks all
- // device locks must be acquired *before* the device lock, and
- // multiple device locks should be acquired parent before child.
- lock sync.Mutex
-}
-
-type metaData struct {
- Devices map[string]*devInfo `json:"Devices"`
-}
-
-// DeviceSet holds information about list of devices
-type DeviceSet struct {
- metaData `json:"-"`
- sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper
- root string
- devicePrefix string
- TransactionID uint64 `json:"-"`
- NextDeviceID int `json:"next_device_id"`
- deviceIDMap []byte
-
- // Options
- dataLoopbackSize int64
- metaDataSize string
- metaDataLoopbackSize int64
- baseFsSize uint64
- filesystem string
- mountOptions string
- mkfsArgs []string
- dataDevice string // block or loop dev
- dataLoopFile string // loopback file, if used
- metadataDevice string // block or loop dev
- metadataLoopFile string // loopback file, if used
- doBlkDiscard bool
- thinpBlockSize uint32
- thinPoolDevice string
- transaction `json:"-"`
- overrideUdevSyncCheck bool
- deferredRemove bool // use deferred removal
- deferredDelete bool // use deferred deletion
- BaseDeviceUUID string // save UUID of base device
- BaseDeviceFilesystem string // save filesystem of base device
- nrDeletedDevices uint // number of deleted devices
- deletionWorkerTicker *time.Ticker
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
- minFreeSpacePercent uint32 //min free space percentage in thinpool
- xfsNospaceRetries string // max retries when xfs receives ENOSPC
- lvmSetupConfig directLVMConfig
-}
-
-// DiskUsage contains information about disk usage and is used when reporting Status of a device.
-type DiskUsage struct {
- // Used bytes on the disk.
- Used uint64
- // Total bytes on the disk.
- Total uint64
- // Available bytes on the disk.
- Available uint64
-}
-
-// Status returns the information about the device.
-type Status struct {
- // PoolName is the name of the data pool.
- PoolName string
- // DataFile is the actual block device for data.
- DataFile string
- // DataLoopback loopback file, if used.
- DataLoopback string
- // MetadataFile is the actual block device for metadata.
- MetadataFile string
- // MetadataLoopback is the loopback file, if used.
- MetadataLoopback string
- // Data is the disk used for data.
- Data DiskUsage
- // Metadata is the disk used for meta data.
- Metadata DiskUsage
- // BaseDeviceSize is base size of container and image
- BaseDeviceSize uint64
- // BaseDeviceFS is backing filesystem.
- BaseDeviceFS string
- // SectorSize size of the vector.
- SectorSize uint64
- // UdevSyncSupported is true if sync is supported.
- UdevSyncSupported bool
- // DeferredRemoveEnabled is true then the device is not unmounted.
- DeferredRemoveEnabled bool
- // True if deferred deletion is enabled. This is different from
- // deferred removal. "removal" means that device mapper device is
- // deactivated. Thin device is still in thin pool and can be activated
- // again. But "deletion" means that thin device will be deleted from
- // thin pool and it can't be activated again.
- DeferredDeleteEnabled bool
- DeferredDeletedDeviceCount uint
- MinFreeSpace uint64
-}
-
-// Structure used to export image/container metadata in inspect.
-type deviceMetadata struct {
- deviceID int
- deviceSize uint64 // size in bytes
- deviceName string // Device name as used during activation
-}
-
-// DevStatus returns information about device mounted containing its id, size and sector information.
-type DevStatus struct {
- // DeviceID is the id of the device.
- DeviceID int
- // Size is the size of the filesystem.
- Size uint64
- // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental.
- TransactionID uint64
- // SizeInSectors indicates the size of the sectors allocated.
- SizeInSectors uint64
- // MappedSectors indicates number of mapped sectors.
- MappedSectors uint64
- // HighestMappedSector is the pointer to the highest mapped sector.
- HighestMappedSector uint64
-}
-
-func getDevName(name string) string {
- return "/dev/mapper/" + name
-}
-
-func (info *devInfo) Name() string {
- hash := info.Hash
- if hash == "" {
- hash = base
- }
- return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash)
-}
-
-func (info *devInfo) DevName() string {
- return getDevName(info.Name())
-}
-
-func (devices *DeviceSet) loopbackDir() string {
- return path.Join(devices.root, "devicemapper")
-}
-
-func (devices *DeviceSet) metadataDir() string {
- return path.Join(devices.root, "metadata")
-}
-
-func (devices *DeviceSet) metadataFile(info *devInfo) string {
- file := info.Hash
- if file == "" {
- file = base
- }
- return path.Join(devices.metadataDir(), file)
-}
-
-func (devices *DeviceSet) transactionMetaFile() string {
- return path.Join(devices.metadataDir(), transactionMetaFile)
-}
-
-func (devices *DeviceSet) deviceSetMetaFile() string {
- return path.Join(devices.metadataDir(), deviceSetMetaFile)
-}
-
-func (devices *DeviceSet) oldMetadataFile() string {
- return path.Join(devices.loopbackDir(), "json")
-}
-
-func (devices *DeviceSet) getPoolName() string {
- if devices.thinPoolDevice == "" {
- return devices.devicePrefix + "-pool"
- }
- return devices.thinPoolDevice
-}
-
-func (devices *DeviceSet) getPoolDevName() string {
- return getDevName(devices.getPoolName())
-}
-
-func (devices *DeviceSet) hasImage(name string) bool {
- dirname := devices.loopbackDir()
- filename := path.Join(dirname, name)
-
- _, err := os.Stat(filename)
- return err == nil
-}
-
-// ensureImage creates a sparse file of bytes at the path
-// /devicemapper/.
-// If the file already exists and new size is larger than its current size, it grows to the new size.
-// Either way it returns the full path.
-func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
- dirname := devices.loopbackDir()
- filename := path.Join(dirname, name)
-
- uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
- if err != nil {
- return "", err
- }
- if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil {
- return "", err
- }
-
- if fi, err := os.Stat(filename); err != nil {
- if !os.IsNotExist(err) {
- return "", err
- }
- logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename)
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
- if err != nil {
- return "", err
- }
- defer file.Close()
-
- if err := file.Truncate(size); err != nil {
- return "", err
- }
- } else {
- if fi.Size() < size {
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
- if err != nil {
- return "", err
- }
- defer file.Close()
- if err := file.Truncate(size); err != nil {
- return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err)
- }
- } else if fi.Size() > size {
- logrus.Warnf("devmapper: Can't shrink loopback file %s", filename)
- }
- }
- return filename, nil
-}
-
-func (devices *DeviceSet) allocateTransactionID() uint64 {
- devices.OpenTransactionID = devices.TransactionID + 1
- return devices.OpenTransactionID
-}
-
-func (devices *DeviceSet) updatePoolTransactionID() error {
- if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil {
- return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err)
- }
- devices.TransactionID = devices.OpenTransactionID
- return nil
-}
-
-func (devices *DeviceSet) removeMetadata(info *devInfo) error {
- if err := os.RemoveAll(devices.metadataFile(info)); err != nil {
- return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err)
- }
- return nil
-}
-
-// Given json data and file path, write it to disk
-func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
- tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
- if err != nil {
- return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
- }
-
- n, err := tmpFile.Write(jsonData)
- if err != nil {
- return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err)
- }
- if n < len(jsonData) {
- return io.ErrShortWrite
- }
- if err := tmpFile.Sync(); err != nil {
- return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err)
- }
- if err := tmpFile.Close(); err != nil {
- return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err)
- }
- if err := os.Rename(tmpFile.Name(), filePath); err != nil {
- return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) saveMetadata(info *devInfo) error {
- jsonData, err := json.Marshal(info)
- if err != nil {
- return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
- }
- if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil {
- return err
- }
- return nil
-}
-
-func (devices *DeviceSet) markDeviceIDUsed(deviceID int) {
- var mask byte
- i := deviceID % 8
- mask = 1 << uint(i)
- devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask
-}
-
-func (devices *DeviceSet) markDeviceIDFree(deviceID int) {
- var mask byte
- i := deviceID % 8
- mask = ^(1 << uint(i))
- devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask
-}
-
-func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool {
- var mask byte
- i := deviceID % 8
- mask = (1 << uint(i))
- return (devices.deviceIDMap[deviceID/8] & mask) == 0
-}
-
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) {
- info := devices.Devices[hash]
- if info == nil {
- info = devices.loadMetadata(hash)
- if info == nil {
- return nil, fmt.Errorf("devmapper: Unknown device %s", hash)
- }
-
- devices.Devices[hash] = info
- }
- return info, nil
-}
-
-func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) {
- devices.Lock()
- defer devices.Unlock()
- info, err := devices.lookupDevice(hash)
- return info, err
-}
-
-// This function relies on that device hash map has been loaded in advance.
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) constructDeviceIDMap() {
- logrus.Debug("devmapper: constructDeviceIDMap()")
- defer logrus.Debug("devmapper: constructDeviceIDMap() END")
-
- for _, info := range devices.Devices {
- devices.markDeviceIDUsed(info.DeviceID)
- logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID)
- }
-}
-
-func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error {
-
- // Skip some of the meta files which are not device files.
- if strings.HasSuffix(finfo.Name(), ".migrated") {
- logrus.Debugf("devmapper: Skipping file %s", path)
- return nil
- }
-
- if strings.HasPrefix(finfo.Name(), ".") {
- logrus.Debugf("devmapper: Skipping file %s", path)
- return nil
- }
-
- if finfo.Name() == deviceSetMetaFile {
- logrus.Debugf("devmapper: Skipping file %s", path)
- return nil
- }
-
- if finfo.Name() == transactionMetaFile {
- logrus.Debugf("devmapper: Skipping file %s", path)
- return nil
- }
-
- logrus.Debugf("devmapper: Loading data for file %s", path)
-
- hash := finfo.Name()
- if hash == base {
- hash = ""
- }
-
- // Include deleted devices also as cleanup delete device logic
- // will go through it and see if there are any deleted devices.
- if _, err := devices.lookupDevice(hash); err != nil {
- return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) loadDeviceFilesOnStart() error {
- logrus.Debug("devmapper: loadDeviceFilesOnStart()")
- defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
-
- var scan = func(path string, info os.FileInfo, err error) error {
- if err != nil {
- logrus.Debugf("devmapper: Can't walk the file %s", path)
- return nil
- }
-
- // Skip any directories
- if info.IsDir() {
- return nil
- }
-
- return devices.deviceFileWalkFunction(path, info)
- }
-
- return filepath.Walk(devices.metadataDir(), scan)
-}
-
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) unregisterDevice(hash string) error {
- logrus.Debugf("devmapper: unregisterDevice(%v)", hash)
- info := &devInfo{
- Hash: hash,
- }
-
- delete(devices.Devices, hash)
-
- if err := devices.removeMetadata(info); err != nil {
- logrus.Debugf("devmapper: Error removing metadata: %s", err)
- return err
- }
-
- return nil
-}
-
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) {
- logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash)
- info := &devInfo{
- Hash: hash,
- DeviceID: id,
- Size: size,
- TransactionID: transactionID,
- Initialized: false,
- devices: devices,
- }
-
- devices.Devices[hash] = info
-
- if err := devices.saveMetadata(info); err != nil {
- // Try to remove unused device
- delete(devices.Devices, hash)
- return nil, err
- }
-
- return info, nil
-}
-
-func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error {
- logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash)
-
- if info.Deleted && !ignoreDeleted {
- return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash)
- }
-
- // Make sure deferred removal on device is canceled, if one was
- // scheduled.
- if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil {
- return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err)
- }
-
- if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
- return nil
- }
-
- return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
-}
-
-// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error
-func xfsSupported() error {
- // Make sure mkfs.xfs is available
- if _, err := exec.LookPath("mkfs.xfs"); err != nil {
- return err // error text is descriptive enough
- }
-
- // Check if kernel supports xfs filesystem or not.
- exec.Command("modprobe", xfs).Run()
-
- f, err := os.Open("/proc/filesystems")
- if err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
- }
- defer f.Close()
-
- s := bufio.NewScanner(f)
- for s.Scan() {
- if strings.HasSuffix(s.Text(), "\txfs") {
- return nil
- }
- }
-
- if err := s.Err(); err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
- }
-
- return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
-}
-
-func determineDefaultFS() string {
- err := xfsSupported()
- if err == nil {
- return xfs
- }
-
- logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err)
- return ext4
-}
-
-// mkfsOptions tries to figure out whether some additional mkfs options are required
-func mkfsOptions(fs string) []string {
- if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) {
- // For kernels earlier than 3.16 (and newer xfsutils),
- // some xfs features need to be explicitly disabled.
- return []string{"-m", "crc=0,finobt=0"}
- }
-
- return []string{}
-}
-
-func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
- devname := info.DevName()
-
- if devices.filesystem == "" {
- devices.filesystem = determineDefaultFS()
- }
- if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil {
- return err
- }
-
- args := mkfsOptions(devices.filesystem)
- args = append(args, devices.mkfsArgs...)
- args = append(args, devname)
-
- logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args)
- defer func() {
- if err != nil {
- logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
- } else {
- logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name())
- }
- }()
-
- switch devices.filesystem {
- case xfs:
- err = exec.Command("mkfs.xfs", args...).Run()
- case ext4:
- err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run()
- if err != nil {
- err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run()
- }
- if err != nil {
- return err
- }
- err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run()
- default:
- err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem)
- }
- return
-}
-
-func (devices *DeviceSet) migrateOldMetaData() error {
- // Migrate old metadata file
- jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- if jsonData != nil {
- m := metaData{Devices: make(map[string]*devInfo)}
-
- if err := json.Unmarshal(jsonData, &m); err != nil {
- return err
- }
-
- for hash, info := range m.Devices {
- info.Hash = hash
- devices.saveMetadata(info)
- }
- if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil {
- return err
- }
-
- }
-
- return nil
-}
-
-// Cleanup deleted devices. It assumes that all the devices have been
-// loaded in the hash table.
-func (devices *DeviceSet) cleanupDeletedDevices() error {
- devices.Lock()
-
- // If there are no deleted devices, there is nothing to do.
- if devices.nrDeletedDevices == 0 {
- devices.Unlock()
- return nil
- }
-
- var deletedDevices []*devInfo
-
- for _, info := range devices.Devices {
- if !info.Deleted {
- continue
- }
- logrus.Debugf("devmapper: Found deleted device %s.", info.Hash)
- deletedDevices = append(deletedDevices, info)
- }
-
- // Delete the deleted devices. DeleteDevice() first takes the info lock
- // and then devices.Lock(). So drop it to avoid deadlock.
- devices.Unlock()
-
- for _, info := range deletedDevices {
- // This will again try deferred deletion.
- if err := devices.DeleteDevice(info.Hash, false); err != nil {
- logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err)
- }
- }
-
- return nil
-}
-
-func (devices *DeviceSet) countDeletedDevices() {
- for _, info := range devices.Devices {
- if !info.Deleted {
- continue
- }
- devices.nrDeletedDevices++
- }
-}
-
-func (devices *DeviceSet) startDeviceDeletionWorker() {
- // Deferred deletion is not enabled. Don't do anything.
- if !devices.deferredDelete {
- return
- }
-
- // Cleanup right away if there are any leaked devices. Note this
- // could cause some slowdown for process startup, if there were
- // Leaked devices
- devices.cleanupDeletedDevices()
- logrus.Debug("devmapper: Worker to cleanup deleted devices started")
- for range devices.deletionWorkerTicker.C {
- devices.cleanupDeletedDevices()
- }
-}
-
-func (devices *DeviceSet) initMetaData() error {
- devices.Lock()
- defer devices.Unlock()
-
- if err := devices.migrateOldMetaData(); err != nil {
- return err
- }
-
- _, transactionID, _, _, _, _, err := devices.poolStatus()
- if err != nil {
- return err
- }
-
- devices.TransactionID = transactionID
-
- if err := devices.loadDeviceFilesOnStart(); err != nil {
- return fmt.Errorf("devmapper: Failed to load device files:%v", err)
- }
-
- devices.constructDeviceIDMap()
- devices.countDeletedDevices()
-
- if err := devices.processPendingTransaction(); err != nil {
- return err
- }
-
- // Start a goroutine to cleanup Deleted Devices
- go devices.startDeviceDeletionWorker()
- return nil
-}
-
-func (devices *DeviceSet) incNextDeviceID() {
- // IDs are 24bit, so wrap around
- devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID
-}
-
-func (devices *DeviceSet) getNextFreeDeviceID() (int, error) {
- devices.incNextDeviceID()
- for i := 0; i <= maxDeviceID; i++ {
- if devices.isDeviceIDFree(devices.NextDeviceID) {
- devices.markDeviceIDUsed(devices.NextDeviceID)
- return devices.NextDeviceID, nil
- }
- devices.incNextDeviceID()
- }
-
- return 0, fmt.Errorf("devmapper: Unable to find a free device ID")
-}
-
-func (devices *DeviceSet) poolHasFreeSpace() error {
- if devices.minFreeSpacePercent == 0 {
- return nil
- }
-
- _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
- if err != nil {
- return err
- }
-
- minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
- if minFreeData < 1 {
- minFreeData = 1
- }
- dataFree := dataTotal - dataUsed
- if dataFree < minFreeData {
- return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData)
- }
-
- minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100
- if minFreeMetadata < 1 {
- minFreeMetadata = 1
- }
-
- metadataFree := metadataTotal - metadataUsed
- if metadataFree < minFreeMetadata {
- return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
- devices.Lock()
- defer devices.Unlock()
-
- deviceID, err := devices.getNextFreeDeviceID()
- if err != nil {
- return nil, err
- }
-
- if err := devices.openTransaction(hash, deviceID); err != nil {
- logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID)
- devices.markDeviceIDFree(deviceID)
- return nil, err
- }
-
- for {
- if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil {
- if devicemapper.DeviceIDExists(err) {
- // Device ID already exists. This should not
- // happen. Now we have a mechanism to find
- // a free device ID. So something is not right.
- // Give a warning and continue.
- logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID)
- deviceID, err = devices.getNextFreeDeviceID()
- if err != nil {
- return nil, err
- }
- // Save new device id into transaction
- devices.refreshTransaction(deviceID)
- continue
- }
- logrus.Debugf("devmapper: Error creating device: %s", err)
- devices.markDeviceIDFree(deviceID)
- return nil, err
- }
- break
- }
-
- logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize)
- info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID)
- if err != nil {
- _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
- devices.markDeviceIDFree(deviceID)
- return nil, err
- }
-
- if err := devices.closeTransaction(); err != nil {
- devices.unregisterDevice(hash)
- devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
- devices.markDeviceIDFree(deviceID)
- return nil, err
- }
- return info, nil
-}
-
-func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error {
- var (
- devinfo *devicemapper.Info
- err error
- )
-
- if err = devices.poolHasFreeSpace(); err != nil {
- return err
- }
-
- if devices.deferredRemove {
- devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name())
- if err != nil {
- return err
- }
- if devinfo != nil && devinfo.DeferredRemove != 0 {
- err = devices.cancelDeferredRemoval(baseInfo)
- if err != nil {
- // If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrEnxio {
- return err
- }
- devinfo = nil
- } else {
- defer devices.deactivateDevice(baseInfo)
- }
- }
- } else {
- devinfo, err = devicemapper.GetInfo(baseInfo.Name())
- if err != nil {
- return err
- }
- }
-
- doSuspend := devinfo != nil && devinfo.Exists != 0
-
- if doSuspend {
- if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil {
- return err
- }
- defer devicemapper.ResumeDevice(baseInfo.Name())
- }
-
- if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil {
- return err
- }
-
- return nil
-}
-
-func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error {
- deviceID, err := devices.getNextFreeDeviceID()
- if err != nil {
- return err
- }
-
- if err := devices.openTransaction(hash, deviceID); err != nil {
- logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID)
- devices.markDeviceIDFree(deviceID)
- return err
- }
-
- for {
- if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil {
- if devicemapper.DeviceIDExists(err) {
- // Device ID already exists. This should not
- // happen. Now we have a mechanism to find
- // a free device ID. So something is not right.
- // Give a warning and continue.
- logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID)
- deviceID, err = devices.getNextFreeDeviceID()
- if err != nil {
- return err
- }
- // Save new device id into transaction
- devices.refreshTransaction(deviceID)
- continue
- }
- logrus.Debugf("devmapper: Error creating snap device: %s", err)
- devices.markDeviceIDFree(deviceID)
- return err
- }
- break
- }
-
- if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil {
- devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
- devices.markDeviceIDFree(deviceID)
- logrus.Debugf("devmapper: Error registering device: %s", err)
- return err
- }
-
- if err := devices.closeTransaction(); err != nil {
- devices.unregisterDevice(hash)
- devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
- devices.markDeviceIDFree(deviceID)
- return err
- }
- return nil
-}
-
-func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
- info := &devInfo{Hash: hash, devices: devices}
-
- jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
- if err != nil {
- logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
- return nil
- }
-
- if err := json.Unmarshal(jsonData, &info); err != nil {
- logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err)
- return nil
- }
-
- if info.DeviceID > maxDeviceID {
- logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID)
- return nil
- }
-
- return info
-}
-
-func getDeviceUUID(device string) (string, error) {
- out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output()
- if err != nil {
- return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err)
- }
-
- uuid := strings.TrimSuffix(string(out), "\n")
- uuid = strings.TrimSpace(uuid)
- logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid)
- return uuid, nil
-}
-
-func (devices *DeviceSet) getBaseDeviceSize() uint64 {
- info, _ := devices.lookupDevice("")
- if info == nil {
- return 0
- }
- return info.Size
-}
-
-func (devices *DeviceSet) getBaseDeviceFS() string {
- return devices.BaseDeviceFilesystem
-}
-
-func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error {
- devices.Lock()
- defer devices.Unlock()
-
- if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
- return err
- }
- defer devices.deactivateDevice(baseInfo)
-
- uuid, err := getDeviceUUID(baseInfo.DevName())
- if err != nil {
- return err
- }
-
- if devices.BaseDeviceUUID != uuid {
- return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID)
- }
-
- if devices.BaseDeviceFilesystem == "" {
- fsType, err := ProbeFsType(baseInfo.DevName())
- if err != nil {
- return err
- }
- if err := devices.saveBaseDeviceFilesystem(fsType); err != nil {
- return err
- }
- }
-
- // If user specified a filesystem using dm.fs option and current
- // file system of base image is not same, warn user that dm.fs
- // will be ignored.
- if devices.BaseDeviceFilesystem != devices.filesystem {
- logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem)
- devices.filesystem = devices.BaseDeviceFilesystem
- }
- return nil
-}
-
-func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error {
- devices.BaseDeviceFilesystem = fs
- return devices.saveDeviceSetMetaData()
-}
-
-func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error {
- devices.Lock()
- defer devices.Unlock()
-
- if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
- return err
- }
- defer devices.deactivateDevice(baseInfo)
-
- uuid, err := getDeviceUUID(baseInfo.DevName())
- if err != nil {
- return err
- }
-
- devices.BaseDeviceUUID = uuid
- return devices.saveDeviceSetMetaData()
-}
-
-func (devices *DeviceSet) createBaseImage() error {
- logrus.Debug("devmapper: Initializing base device-mapper thin volume")
-
- // Create initial device
- info, err := devices.createRegisterDevice("")
- if err != nil {
- return err
- }
-
- logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume")
-
- if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return err
- }
-
- if err := devices.createFilesystem(info); err != nil {
- return err
- }
-
- info.Initialized = true
- if err := devices.saveMetadata(info); err != nil {
- info.Initialized = false
- return err
- }
-
- if err := devices.saveBaseDeviceUUID(info); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
- }
-
- return nil
-}
-
-// Returns if thin pool device exists or not. If device exists, also makes
-// sure it is a thin pool device and not some other type of device.
-func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
- logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice)
-
- info, err := devicemapper.GetInfo(thinPoolDevice)
- if err != nil {
- return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err)
- }
-
- // Device does not exist.
- if info.Exists == 0 {
- return false, nil
- }
-
- _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice)
- if err != nil {
- return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err)
- }
-
- if deviceType != "thin-pool" {
- return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice)
- }
-
- return true, nil
-}
-
-func (devices *DeviceSet) checkThinPool() error {
- _, transactionID, dataUsed, _, _, _, err := devices.poolStatus()
- if err != nil {
- return err
- }
- if dataUsed != 0 {
- return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks",
- devices.thinPoolDevice)
- }
- if transactionID != 0 {
- return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID",
- devices.thinPoolDevice)
- }
- return nil
-}
-
-// Base image is initialized properly. Either save UUID for first time (for
-// upgrade case or verify UUID.
-func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
- // If BaseDeviceUUID is nil (upgrade case), save it and return success.
- if devices.BaseDeviceUUID == "" {
- if err := devices.saveBaseDeviceUUID(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
- }
- return nil
- }
-
- if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
-
- if !userBaseSize {
- return nil
- }
-
- if devices.baseFsSize < devices.getBaseDeviceSize() {
- return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize())))
- }
-
- if devices.baseFsSize == devices.getBaseDeviceSize() {
- return nil
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- info.Size = devices.baseFsSize
-
- if err := devices.saveMetadata(info); err != nil {
- // Try to remove unused device
- delete(devices.Devices, info.Hash)
- return err
- }
-
- return devices.growFS(info)
-}
-
-func (devices *DeviceSet) growFS(info *devInfo) error {
- if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return fmt.Errorf("Error activating devmapper device: %s", err)
- }
-
- defer devices.deactivateDevice(info)
-
- fsMountPoint := "/run/containers/storage/mnt"
- if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
- if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
- return err
- }
- defer os.RemoveAll(fsMountPoint)
- }
-
- options := ""
- if devices.BaseDeviceFilesystem == xfs {
- // XFS needs nouuid or it can't mount filesystems with the same fs
- options = joinMountOptions(options, "nouuid")
- }
- options = joinMountOptions(options, devices.mountOptions)
-
- if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
- }
-
- defer func() {
- if err := mount.Unmount(fsMountPoint); err != nil {
- logrus.Warnf("devmapper.growFS cleanup error: %v", err)
- }
- }()
-
- switch devices.BaseDeviceFilesystem {
- case ext4:
- if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
- }
- case xfs:
- if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
- }
- default:
- return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem)
- }
- return nil
-}
-
-func (devices *DeviceSet) setupBaseImage() error {
- oldInfo, _ := devices.lookupDeviceWithLock("")
-
- // base image already exists. If it is initialized properly, do UUID
- // verification and return. Otherwise remove image and set it up
- // fresh.
-
- if oldInfo != nil {
- if oldInfo.Initialized && !oldInfo.Deleted {
- if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil {
- return err
- }
-
- if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil {
- return err
- }
-
- return nil
- }
-
- logrus.Debug("devmapper: Removing uninitialized base image")
- // If previous base device is in deferred delete state,
- // that needs to be cleaned up first. So don't try
- // deferred deletion.
- if err := devices.DeleteDevice("", true); err != nil {
- return err
- }
- }
-
- // If we are setting up base image for the first time, make sure
- // thin pool is empty.
- if devices.thinPoolDevice != "" && oldInfo == nil {
- if err := devices.checkThinPool(); err != nil {
- return err
- }
- }
-
- // Create new base image device
- if err := devices.createBaseImage(); err != nil {
- return err
- }
-
- return nil
-}
-
-func setCloseOnExec(name string) {
- fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
- for _, i := range fileInfos {
- link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
- if link == name {
- fd, err := strconv.Atoi(i.Name())
- if err == nil {
- unix.CloseOnExec(fd)
- }
- }
- }
-}
-
-func major(device uint64) uint64 {
- return (device >> 8) & 0xfff
-}
-
-func minor(device uint64) uint64 {
- return (device & 0xff) | ((device >> 12) & 0xfff00)
-}
-
-// ResizePool increases the size of the pool.
-func (devices *DeviceSet) ResizePool(size int64) error {
- dirname := devices.loopbackDir()
- datafilename := path.Join(dirname, "data")
- if len(devices.dataDevice) > 0 {
- datafilename = devices.dataDevice
- }
- metadatafilename := path.Join(dirname, "metadata")
- if len(devices.metadataDevice) > 0 {
- metadatafilename = devices.metadataDevice
- }
-
- datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0)
- if datafile == nil {
- return err
- }
- defer datafile.Close()
-
- fi, err := datafile.Stat()
- if fi == nil {
- return err
- }
-
- if fi.Size() > size {
- return fmt.Errorf("devmapper: Can't shrink file")
- }
-
- dataloopback := loopback.FindLoopDeviceFor(datafile)
- if dataloopback == nil {
- return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename)
- }
- defer dataloopback.Close()
-
- metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0)
- if metadatafile == nil {
- return err
- }
- defer metadatafile.Close()
-
- metadataloopback := loopback.FindLoopDeviceFor(metadatafile)
- if metadataloopback == nil {
- return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename)
- }
- defer metadataloopback.Close()
-
- // Grow loopback file
- if err := datafile.Truncate(size); err != nil {
- return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err)
- }
-
- // Reload size for loopback device
- if err := loopback.SetCapacity(dataloopback); err != nil {
- return fmt.Errorf("Unable to update loopback capacity: %s", err)
- }
-
- // Suspend the pool
- if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil {
- return fmt.Errorf("devmapper: Unable to suspend pool: %s", err)
- }
-
- // Reload with the new block sizes
- if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil {
- return fmt.Errorf("devmapper: Unable to reload pool: %s", err)
- }
-
- // Resume the pool
- if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil {
- return fmt.Errorf("devmapper: Unable to resume pool: %s", err)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) loadTransactionMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
- if err != nil {
- // There is no active transaction. This will be the case
- // during upgrade.
- if os.IsNotExist(err) {
- devices.OpenTransactionID = devices.TransactionID
- return nil
- }
- return err
- }
-
- json.Unmarshal(jsonData, &devices.transaction)
- return nil
-}
-
-func (devices *DeviceSet) saveTransactionMetaData() error {
- jsonData, err := json.Marshal(&devices.transaction)
- if err != nil {
- return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
- }
-
- return devices.writeMetaFile(jsonData, devices.transactionMetaFile())
-}
-
-func (devices *DeviceSet) removeTransactionMetaData() error {
- return os.RemoveAll(devices.transactionMetaFile())
-}
-
-func (devices *DeviceSet) rollbackTransaction() error {
- logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID)
-
- // A device id might have already been deleted before transaction
- // closed. In that case this call will fail. Just leave a message
- // in case of failure.
- if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil {
- logrus.Errorf("devmapper: Unable to delete device: %s", err)
- }
-
- dinfo := &devInfo{Hash: devices.DeviceIDHash}
- if err := devices.removeMetadata(dinfo); err != nil {
- logrus.Errorf("devmapper: Unable to remove metadata: %s", err)
- } else {
- devices.markDeviceIDFree(devices.DeviceID)
- }
-
- if err := devices.removeTransactionMetaData(); err != nil {
- logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) processPendingTransaction() error {
- if err := devices.loadTransactionMetaData(); err != nil {
- return err
- }
-
- // If there was open transaction but pool transaction ID is same
- // as open transaction ID, nothing to roll back.
- if devices.TransactionID == devices.OpenTransactionID {
- return nil
- }
-
- // If open transaction ID is less than pool transaction ID, something
- // is wrong. Bail out.
- if devices.OpenTransactionID < devices.TransactionID {
- logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID)
- return nil
- }
-
- // Pool transaction ID is not same as open transaction. There is
- // a transaction which was not completed.
- if err := devices.rollbackTransaction(); err != nil {
- return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err)
- }
-
- devices.OpenTransactionID = devices.TransactionID
- return nil
-}
-
-func (devices *DeviceSet) loadDeviceSetMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
- if err != nil {
- // For backward compatibility return success if file does
- // not exist.
- if os.IsNotExist(err) {
- return nil
- }
- return err
- }
-
- return json.Unmarshal(jsonData, devices)
-}
-
-func (devices *DeviceSet) saveDeviceSetMetaData() error {
- jsonData, err := json.Marshal(devices)
- if err != nil {
- return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
- }
-
- return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile())
-}
-
-func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error {
- devices.allocateTransactionID()
- devices.DeviceIDHash = hash
- devices.DeviceID = DeviceID
- if err := devices.saveTransactionMetaData(); err != nil {
- return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err)
- }
- return nil
-}
-
-func (devices *DeviceSet) refreshTransaction(DeviceID int) error {
- devices.DeviceID = DeviceID
- if err := devices.saveTransactionMetaData(); err != nil {
- return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err)
- }
- return nil
-}
-
-func (devices *DeviceSet) closeTransaction() error {
- if err := devices.updatePoolTransactionID(); err != nil {
- logrus.Debug("devmapper: Failed to close Transaction")
- return err
- }
- return nil
-}
-
-func determineDriverCapabilities(version string) error {
- // Kernel driver version >= 4.27.0 support deferred removal
-
- logrus.Debugf("devicemapper: kernel dm driver version is %s", version)
-
- versionSplit := strings.Split(version, ".")
- major, err := strconv.Atoi(versionSplit[0])
- if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0])
- }
-
- if major > 4 {
- driverDeferredRemovalSupport = true
- return nil
- }
-
- if major < 4 {
- return nil
- }
-
- minor, err := strconv.Atoi(versionSplit[1])
- if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1])
- }
-
- /*
- * If major is 4 and minor is 27, then there is no need to
- * check for patch level as it can not be less than 0.
- */
- if minor >= 27 {
- driverDeferredRemovalSupport = true
- return nil
- }
-
- return nil
-}
-
-// Determine the major and minor number of loopback device
-func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
- var stat unix.Stat_t
- err := unix.Stat(file.Name(), &stat)
- if err != nil {
- return 0, 0, err
- }
-
- dev := stat.Rdev
- majorNum := major(uint64(dev))
- minorNum := minor(uint64(dev))
-
- logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum)
- return majorNum, minorNum, nil
-}
-
-// Given a file which is backing file of a loop back device, find the
-// loopback device name and its major/minor number.
-func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) {
- file, err := os.Open(filename)
- if err != nil {
- logrus.Debugf("devmapper: Failed to open file %s", filename)
- return "", 0, 0, err
- }
-
- defer file.Close()
- loopbackDevice := loopback.FindLoopDeviceFor(file)
- if loopbackDevice == nil {
- return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename)
- }
- defer loopbackDevice.Close()
-
- Major, Minor, err := getDeviceMajorMinor(loopbackDevice)
- if err != nil {
- return "", 0, 0, err
- }
- return loopbackDevice.Name(), Major, Minor, nil
-}
-
-// Get the major/minor numbers of thin pool data and metadata devices
-func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) {
- var params, poolDataMajMin, poolMetadataMajMin string
-
- _, _, _, params, err := devicemapper.GetTable(devices.getPoolName())
- if err != nil {
- return 0, 0, 0, 0, err
- }
-
- if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil {
- return 0, 0, 0, 0, err
- }
-
- logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin)
-
- poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":")
- poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32)
- if err != nil {
- return 0, 0, 0, 0, err
- }
-
- poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32)
- if err != nil {
- return 0, 0, 0, 0, err
- }
-
- poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":")
- poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32)
- if err != nil {
- return 0, 0, 0, 0, err
- }
-
- poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32)
- if err != nil {
- return 0, 0, 0, 0, err
- }
-
- return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil
-}
-
-func (devices *DeviceSet) loadThinPoolLoopBackInfo() error {
- poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin()
- if err != nil {
- return err
- }
-
- dirname := devices.loopbackDir()
-
- // data device has not been passed in. So there should be a data file
- // which is being mounted as loop device.
- if devices.dataDevice == "" {
- datafilename := path.Join(dirname, "data")
- dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename)
- if err != nil {
- return err
- }
-
- // Compare the two
- if poolDataMajor == dataMajor && poolDataMinor == dataMinor {
- devices.dataDevice = dataLoopDevice
- devices.dataLoopFile = datafilename
- }
-
- }
-
- // metadata device has not been passed in. So there should be a
- // metadata file which is being mounted as loop device.
- if devices.metadataDevice == "" {
- metadatafilename := path.Join(dirname, "metadata")
- metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename)
- if err != nil {
- return err
- }
- if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor {
- devices.metadataDevice = metadataLoopDevice
- devices.metadataLoopFile = metadatafilename
- }
- }
-
- return nil
-}
-
-func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
-
- // If user asked for deferred removal then check both libdm library
- // and kernel driver support deferred removal otherwise error out.
- if enableDeferredRemoval {
- if !driverDeferredRemovalSupport {
- return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it")
- }
- if !devicemapper.LibraryDeferredRemovalSupport {
- return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
- }
- logrus.Debug("devmapper: Deferred removal support enabled.")
- devices.deferredRemove = true
- }
-
- if enableDeferredDeletion {
- if !devices.deferredRemove {
- return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
- }
- logrus.Debug("devmapper: Deferred deletion support enabled.")
- devices.deferredDelete = true
- }
- return nil
-}
-
-func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
- if err := devices.enableDeferredRemovalDeletion(); err != nil {
- return err
- }
-
- // https://github.com/docker/docker/issues/4036
- if supported := devicemapper.UdevSetSyncSupport(true); !supported {
- logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options")
-
- if !devices.overrideUdevSyncCheck {
- return graphdriver.ErrNotSupported
- }
- }
-
- //create the root dir of the devmapper driver ownership to match this
- //daemon's remapped root uid/gid so containers can start properly
- uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
- if err != nil {
- return err
- }
- if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil {
- return err
- }
- if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
- return err
- }
-
- prevSetupConfig, err := readLVMConfig(devices.root)
- if err != nil {
- return err
- }
-
- if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) {
- if devices.thinPoolDevice != "" {
- return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified")
- }
-
- if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) {
- if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) {
- return errors.New("changing direct-lvm config is not supported")
- }
- logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode")
- if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil {
- return err
- }
- if err := setupDirectLVM(devices.lvmSetupConfig); err != nil {
- return err
- }
- if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil {
- return err
- }
- }
- devices.thinPoolDevice = "storage-thinpool"
- logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice)
- }
-
- // Set the device prefix from the device id and inode of the storage root dir
- var st unix.Stat_t
- if err := unix.Stat(devices.root, &st); err != nil {
- return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err)
- }
- // "reg-" stands for "regular file".
- // In the future we might use "dev-" for "device file", etc.
- // container-maj,min[-inode] stands for:
- // - Managed by container storage
- // - The target of this device is at major and minor
- // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
- devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino)
- logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
-
- // Check for the existence of the thin-pool device
- poolExists, err := devices.thinPoolExists(devices.getPoolName())
- if err != nil {
- return err
- }
-
- // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files
- // that are not Close-on-exec,
- // so we add this badhack to make sure it closes itself
- setCloseOnExec("/dev/mapper/control")
-
- // Make sure the sparse images exist in /devicemapper/data and
- // /devicemapper/metadata
-
- createdLoopback := false
-
- // If the pool doesn't exist, create it
- if !poolExists && devices.thinPoolDevice == "" {
- logrus.Debug("devmapper: Pool doesn't exist. Creating it.")
-
- var (
- dataFile *os.File
- metadataFile *os.File
- )
-
- fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir())
- if err != nil {
- return err
- }
- switch fsMagic {
- case graphdriver.FsMagicAufs:
- return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
- }
-
- if devices.dataDevice == "" {
- // Make sure the sparse images exist in /devicemapper/data
-
- hasData := devices.hasImage("data")
-
- if !doInit && !hasData {
- return errors.New("loopback data file not found")
- }
-
- if !hasData {
- createdLoopback = true
- }
-
- data, err := devices.ensureImage("data", devices.dataLoopbackSize)
- if err != nil {
- logrus.Debugf("devmapper: Error device ensureImage (data): %s", err)
- return err
- }
-
- dataFile, err = loopback.AttachLoopDevice(data)
- if err != nil {
- return err
- }
- devices.dataLoopFile = data
- devices.dataDevice = dataFile.Name()
- } else {
- dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
- if err != nil {
- return err
- }
- }
- defer dataFile.Close()
-
- if devices.metadataDevice == "" {
- // Make sure the sparse images exist in /devicemapper/metadata
-
- hasMetadata := devices.hasImage("metadata")
-
- if !doInit && !hasMetadata {
- return errors.New("loopback metadata file not found")
- }
-
- if !hasMetadata {
- createdLoopback = true
- }
-
- metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
- if err != nil {
- logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err)
- return err
- }
-
- metadataFile, err = loopback.AttachLoopDevice(metadata)
- if err != nil {
- return err
- }
- devices.metadataLoopFile = metadata
- devices.metadataDevice = metadataFile.Name()
- } else {
- metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
- if err != nil {
- return err
- }
- }
- defer metadataFile.Close()
-
- if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
- return err
- }
- defer func() {
- if retErr != nil {
- err = devices.deactivatePool()
- if err != nil {
- logrus.Warnf("devmapper: Failed to deactivatePool: %v", err)
- }
- }
- }()
- }
-
- // Pool already exists and caller did not pass us a pool. That means
- // we probably created pool earlier and could not remove it as some
- // containers were still using it. Detect some of the properties of
- // pool, like is it using loop devices.
- if poolExists && devices.thinPoolDevice == "" {
- if err := devices.loadThinPoolLoopBackInfo(); err != nil {
- logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err)
- return err
- }
- }
-
- // If we didn't just create the data or metadata image, we need to
- // load the transaction id and migrate old metadata
- if !createdLoopback {
- if err := devices.initMetaData(); err != nil {
- return err
- }
- }
-
- if devices.thinPoolDevice == "" {
- if devices.metadataLoopFile != "" || devices.dataLoopFile != "" {
- logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.")
- }
- }
-
- // Right now this loads only NextDeviceID. If there is more metadata
- // down the line, we might have to move it earlier.
- if err := devices.loadDeviceSetMetaData(); err != nil {
- return err
- }
-
- // Setup the base image
- if doInit {
- if err := devices.setupBaseImage(); err != nil {
- logrus.Debugf("devmapper: Error device setupBaseImage: %s", err)
- return err
- }
- }
-
- return nil
-}
-
-// AddDevice adds a device and registers in the hash.
-func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error {
- logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash)
- defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash)
-
- // If a deleted device exists, return error.
- baseInfo, err := devices.lookupDeviceWithLock(baseHash)
- if err != nil {
- return err
- }
-
- if baseInfo.Deleted {
- return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash)
- }
-
- baseInfo.lock.Lock()
- defer baseInfo.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- // Also include deleted devices in case hash of new device is
- // same as one of the deleted devices.
- if info, _ := devices.lookupDevice(hash); info != nil {
- return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted)
- }
-
- size, err := devices.parseStorageOpt(storageOpt)
- if err != nil {
- return err
- }
-
- if size == 0 {
- size = baseInfo.Size
- }
-
- if size < baseInfo.Size {
- return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size)))
- }
-
- if err := devices.takeSnapshot(hash, baseInfo, size); err != nil {
- return err
- }
-
- // Grow the container rootfs.
- if size > baseInfo.Size {
- info, err := devices.lookupDevice(hash)
- if err != nil {
- return err
- }
-
- if err := devices.growFS(info); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) {
-
- // Read size to change the block device size per container.
- for key, val := range storageOpt {
- key := strings.ToLower(key)
- switch key {
- case "size":
- size, err := units.RAMInBytes(val)
- if err != nil {
- return 0, err
- }
- return uint64(size), nil
- default:
- return 0, fmt.Errorf("Unknown option %s", key)
- }
- }
-
- return 0, nil
-}
-
-func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
- // If device is already in deleted state, there is nothing to be done.
- if info.Deleted {
- return nil
- }
-
- logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash)
-
- info.Deleted = true
-
- // save device metadata to reflect deleted state.
- if err := devices.saveMetadata(info); err != nil {
- info.Deleted = false
- return err
- }
-
- devices.nrDeletedDevices++
- return nil
-}
-
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error {
- err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID)
- if err != nil {
- // If syncDelete is true, we want to return error. If deferred
- // deletion is not enabled, we return an error. If error is
- // something other then EBUSY, return an error.
- if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy {
- logrus.Debugf("devmapper: Error deleting device: %s", err)
- return err
- }
- }
-
- if err == nil {
- if err := devices.unregisterDevice(info.Hash); err != nil {
- return err
- }
- // If device was already in deferred delete state that means
- // deletion was being tried again later. Reduce the deleted
- // device count.
- if info.Deleted {
- devices.nrDeletedDevices--
- }
- devices.markDeviceIDFree(info.DeviceID)
- } else {
- if err := devices.markForDeferredDeletion(info); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Issue discard only if device open count is zero.
-func (devices *DeviceSet) issueDiscard(info *devInfo) error {
- logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash)
- defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash)
- // This is a workaround for the kernel not discarding block so
- // on the thin pool when we remove a thinp device, so we do it
- // manually.
- // Even if device is deferred deleted, activate it and issue
- // discards.
- if err := devices.activateDeviceIfNeeded(info, true); err != nil {
- return err
- }
-
- devinfo, err := devicemapper.GetInfo(info.Name())
- if err != nil {
- return err
- }
-
- if devinfo.OpenCount != 0 {
- logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount)
- return nil
- }
-
- if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil {
- logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err)
- }
- return nil
-}
-
-// Should be called with devices.Lock() held.
-func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
- if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil {
- logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID)
- return err
- }
-
- defer devices.closeTransaction()
-
- if devices.doBlkDiscard {
- devices.issueDiscard(info)
- }
-
- // Try to deactivate device in case it is active.
- // If deferred removal is enabled and deferred deletion is disabled
- // then make sure device is removed synchronously. There have been
- // some cases of device being busy for short duration and we would
- // rather busy wait for device removal to take care of these cases.
- deferredRemove := devices.deferredRemove
- if !devices.deferredDelete {
- deferredRemove = false
- }
-
- if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil {
- logrus.Debugf("devmapper: Error deactivating device: %s", err)
- return err
- }
-
- if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-// DeleteDevice will return success if device has been marked for deferred
-// removal. If one wants to override that and want DeleteDevice() to fail if
-// device was busy and could not be deleted, set syncDelete=true.
-func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
- logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete)
- defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete)
- info, err := devices.lookupDeviceWithLock(hash)
- if err != nil {
- return err
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- return devices.deleteDevice(info, syncDelete)
-}
-
-func (devices *DeviceSet) deactivatePool() error {
- logrus.Debug("devmapper: deactivatePool() START")
- defer logrus.Debug("devmapper: deactivatePool() END")
- devname := devices.getPoolDevName()
-
- devinfo, err := devicemapper.GetInfo(devname)
- if err != nil {
- return err
- }
-
- if devinfo.Exists == 0 {
- return nil
- }
- if err := devicemapper.RemoveDevice(devname); err != nil {
- return err
- }
-
- if d, err := devicemapper.GetDeps(devname); err == nil {
- logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count)
- }
-
- return nil
-}
-
-func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
- return devices.deactivateDeviceMode(info, devices.deferredRemove)
-}
-
-func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error {
- var err error
- logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash)
- defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
-
- devinfo, err := devicemapper.GetInfo(info.Name())
- if err != nil {
- return err
- }
-
- if devinfo.Exists == 0 {
- return nil
- }
-
- if deferredRemove {
- err = devicemapper.RemoveDeviceDeferred(info.Name())
- } else {
- err = devices.removeDevice(info.Name())
- }
-
- // This function's semantics is such that it does not return an
- // error if device does not exist. So if device went away by
- // the time we actually tried to remove it, do not return error.
- if errors.Cause(err) != devicemapper.ErrEnxio {
- return err
- }
- return nil
-}
-
-// Issues the underlying dm remove operation.
-func (devices *DeviceSet) removeDevice(devname string) error {
- var err error
-
- logrus.Debugf("devmapper: removeDevice START(%s)", devname)
- defer logrus.Debugf("devmapper: removeDevice END(%s)", devname)
-
- for i := 0; i < 200; i++ {
- err = devicemapper.RemoveDevice(devname)
- if err == nil {
- break
- }
- if errors.Cause(err) != devicemapper.ErrBusy {
- return err
- }
-
- // If we see EBUSY it may be a transient error,
- // sleep a bit a retry a few times.
- devices.Unlock()
- time.Sleep(100 * time.Millisecond)
- devices.Lock()
- }
-
- return err
-}
-
-func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
- if !devices.deferredRemove {
- return nil
- }
-
- logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name())
- defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name())
-
- devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
- if err != nil {
- return err
- }
-
- if devinfo != nil && devinfo.DeferredRemove == 0 {
- return nil
- }
-
- // Cancel deferred remove
- if err := devices.cancelDeferredRemoval(info); err != nil {
- // If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrBusy {
- return err
- }
- }
- return nil
-}
-
-func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
- logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name())
- defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name())
-
- var err error
-
- // Cancel deferred remove
- for i := 0; i < 100; i++ {
- err = devicemapper.CancelDeferredRemove(info.Name())
- if err != nil {
- if errors.Cause(err) != devicemapper.ErrBusy {
- // If we see EBUSY it may be a transient error,
- // sleep a bit a retry a few times.
- devices.Unlock()
- time.Sleep(100 * time.Millisecond)
- devices.Lock()
- continue
- }
- }
- break
- }
- return err
-}
-
-func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
- files, err := ioutil.ReadDir(dir)
- if err != nil {
- logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
- return
- }
-
- for _, d := range files {
- if !d.IsDir() {
- continue
- }
-
- name := d.Name()
- fullname := path.Join(dir, name)
-
- // We use MNT_DETACH here in case it is still busy in some running
- // container. This means it'll go away from the global scope directly,
- // and the device will be released when that container dies.
- if err := mount.Unmount(fullname); err != nil {
- logrus.Warnf("devmapper.Shutdown error: %s", err)
- }
-
- if devInfo, err := devices.lookupDevice(name); err != nil {
- logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err)
- } else {
- if err := devices.deactivateDevice(devInfo); err != nil {
- logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err)
- }
- }
- }
-}
-
-// Shutdown shuts down the device by unmounting the root.
-func (devices *DeviceSet) Shutdown(home string) error {
- logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
- logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
- defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
-
- // Stop deletion worker. This should start delivering new events to
- // ticker channel. That means no new instance of cleanupDeletedDevice()
- // will run after this call. If one instance is already running at
- // the time of the call, it must be holding devices.Lock() and
- // we will block on this lock till cleanup function exits.
- devices.deletionWorkerTicker.Stop()
-
- devices.Lock()
- // Save DeviceSet Metadata first. Docker kills all threads if they
- // don't finish in certain time. It is possible that Shutdown()
- // routine does not finish in time as we loop trying to deactivate
- // some devices while these are busy. In that case shutdown() routine
- // will be killed and we will not get a chance to save deviceset
- // metadata. Hence save this early before trying to deactivate devices.
- devices.saveDeviceSetMetaData()
- devices.unmountAndDeactivateAll(path.Join(home, "mnt"))
- devices.Unlock()
-
- info, _ := devices.lookupDeviceWithLock("")
- if info != nil {
- info.lock.Lock()
- devices.Lock()
- if err := devices.deactivateDevice(info); err != nil {
- logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err)
- }
- devices.Unlock()
- info.lock.Unlock()
- }
-
- devices.Lock()
- if devices.thinPoolDevice == "" {
- if err := devices.deactivatePool(); err != nil {
- logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err)
- }
- }
- devices.Unlock()
-
- return nil
-}
-
-// Recent XFS changes allow changing behavior of filesystem in case of errors.
-// When thin pool gets full and XFS gets ENOSPC error, currently it tries
-// IO infinitely and sometimes it can block the container process
-// and process can't be killWith 0 value, XFS will not retry upon error
-// and instead will shutdown filesystem.
-
-func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
- dmDevicePath, err := os.Readlink(info.DevName())
- if err != nil {
- return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
- }
-
- dmDeviceName := path.Base(dmDevicePath)
- filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
- maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
- if err != nil {
- return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
- }
- defer maxRetriesFile.Close()
-
- // Set max retries to 0
- _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
- if err != nil {
- return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
- }
- return nil
-}
-
-// MountDevice mounts the device if not already mounted.
-func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error {
- info, err := devices.lookupDeviceWithLock(hash)
- if err != nil {
- return err
- }
-
- if info.Deleted {
- return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash)
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
- }
-
- fstype, err := ProbeFsType(info.DevName())
- if err != nil {
- return err
- }
-
- options := ""
-
- if fstype == xfs {
- // XFS needs nouuid or it can't mount filesystems with the same fs
- options = joinMountOptions(options, "nouuid")
- }
-
- mountOptions := devices.mountOptions
- if len(moptions.Options) > 0 {
- addNouuid := strings.Contains("nouuid", mountOptions)
- mountOptions = strings.Join(moptions.Options, ",")
- if addNouuid {
- mountOptions = fmt.Sprintf("nouuid,%s", mountOptions)
- }
- }
-
- options = joinMountOptions(options, mountOptions)
- options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
-
- if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
- }
-
- if fstype == xfs && devices.xfsNospaceRetries != "" {
- if err := devices.xfsSetNospaceRetries(info); err != nil {
- if err := mount.Unmount(path); err != nil {
- logrus.Warnf("devmapper.MountDevice cleanup error: %v", err)
- }
- devices.deactivateDevice(info)
- return err
- }
- }
-
- return nil
-}
-
-// UnmountDevice unmounts the device and removes it from hash.
-func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
- logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash)
- defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash)
-
- info, err := devices.lookupDeviceWithLock(hash)
- if err != nil {
- return err
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- logrus.Debugf("devmapper: Unmount(%s)", mountPath)
- if err := mount.Unmount(mountPath); err != nil {
- if ok, _ := Mounted(mountPath); ok {
- return err
- }
- }
- logrus.Debug("devmapper: Unmount done")
-
- // Remove the mountpoint here. Removing the mountpoint (in newer kernels)
- // will cause all other instances of this mount in other mount namespaces
- // to be killed (this is an anti-DoS measure that is necessary for things
- // like devicemapper). This is necessary to avoid cases where a libdm mount
- // that is present in another namespace will cause subsequent RemoveDevice
- // operations to fail. We ignore any errors here because this may fail on
- // older kernels which don't have
- // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.
- if err := os.Remove(mountPath); err != nil {
- logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err)
- }
-
- return devices.deactivateDevice(info)
-}
-
-// HasDevice returns true if the device metadata exists.
-func (devices *DeviceSet) HasDevice(hash string) bool {
- info, _ := devices.lookupDeviceWithLock(hash)
- return info != nil
-}
-
-// List returns a list of device ids.
-func (devices *DeviceSet) List() []string {
- devices.Lock()
- defer devices.Unlock()
-
- ids := make([]string, len(devices.Devices))
- i := 0
- for k := range devices.Devices {
- ids[i] = k
- i++
- }
- return ids
-}
-
-func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) {
- var params string
- _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName)
- if err != nil {
- return
- }
- if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil {
- return
- }
- return
-}
-
-// GetDeviceStatus provides size, mapped sectors
-func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) {
- info, err := devices.lookupDeviceWithLock(hash)
- if err != nil {
- return nil, err
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- devices.Lock()
- defer devices.Unlock()
-
- status := &DevStatus{
- DeviceID: info.DeviceID,
- Size: info.Size,
- TransactionID: info.TransactionID,
- }
-
- if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
- }
-
- sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName())
-
- if err != nil {
- return nil, err
- }
-
- status.SizeInSectors = sizeInSectors
- status.MappedSectors = mappedSectors
- status.HighestMappedSector = highestMappedSector
-
- return status, nil
-}
-
-func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) {
- var params string
- if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil {
- _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal)
- }
- return
-}
-
-// DataDevicePath returns the path to the data storage for this deviceset,
-// regardless of loopback or block device
-func (devices *DeviceSet) DataDevicePath() string {
- return devices.dataDevice
-}
-
-// MetadataDevicePath returns the path to the metadata storage for this deviceset,
-// regardless of loopback or block device
-func (devices *DeviceSet) MetadataDevicePath() string {
- return devices.metadataDevice
-}
-
-func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
- buf := new(unix.Statfs_t)
- if err := unix.Statfs(loopFile, buf); err != nil {
- logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
- return 0, err
- }
- return buf.Bfree * uint64(buf.Bsize), nil
-}
-
-func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) {
- if loopFile != "" {
- fi, err := os.Stat(loopFile)
- if err != nil {
- logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err)
- return false, err
- }
- return fi.Mode().IsRegular(), nil
- }
- return false, nil
-}
-
-// Status returns the current status of this deviceset
-func (devices *DeviceSet) Status() *Status {
- devices.Lock()
- defer devices.Unlock()
-
- status := &Status{}
-
- status.PoolName = devices.getPoolName()
- status.DataFile = devices.DataDevicePath()
- status.DataLoopback = devices.dataLoopFile
- status.MetadataFile = devices.MetadataDevicePath()
- status.MetadataLoopback = devices.metadataLoopFile
- status.UdevSyncSupported = devicemapper.UdevSyncSupported()
- status.DeferredRemoveEnabled = devices.deferredRemove
- status.DeferredDeleteEnabled = devices.deferredDelete
- status.DeferredDeletedDeviceCount = devices.nrDeletedDevices
- status.BaseDeviceSize = devices.getBaseDeviceSize()
- status.BaseDeviceFS = devices.getBaseDeviceFS()
-
- totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
- if err == nil {
- // Convert from blocks to bytes
- blockSizeInSectors := totalSizeInSectors / dataTotal
-
- status.Data.Used = dataUsed * blockSizeInSectors * 512
- status.Data.Total = dataTotal * blockSizeInSectors * 512
- status.Data.Available = status.Data.Total - status.Data.Used
-
- // metadata blocks are always 4k
- status.Metadata.Used = metadataUsed * 4096
- status.Metadata.Total = metadataTotal * 4096
- status.Metadata.Available = status.Metadata.Total - status.Metadata.Used
-
- status.SectorSize = blockSizeInSectors * 512
-
- if check, _ := devices.isRealFile(devices.dataLoopFile); check {
- actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile)
- if err == nil && actualSpace < status.Data.Available {
- status.Data.Available = actualSpace
- }
- }
-
- if check, _ := devices.isRealFile(devices.metadataLoopFile); check {
- actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile)
- if err == nil && actualSpace < status.Metadata.Available {
- status.Metadata.Available = actualSpace
- }
- }
-
- minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
- status.MinFreeSpace = minFreeData * blockSizeInSectors * 512
- }
-
- return status
-}
-
-// Status returns the current status of this deviceset
-func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) {
- info, err := devices.lookupDeviceWithLock(hash)
- if err != nil {
- return nil, err
- }
-
- info.lock.Lock()
- defer info.lock.Unlock()
-
- metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()}
- return metadata, nil
-}
-
-// NewDeviceSet creates the device set based on the options provided.
-func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) {
- devicemapper.SetDevDir("/dev")
-
- devices := &DeviceSet{
- root: root,
- metaData: metaData{Devices: make(map[string]*devInfo)},
- dataLoopbackSize: defaultDataLoopbackSize,
- metaDataLoopbackSize: defaultMetaDataLoopbackSize,
- baseFsSize: defaultBaseFsSize,
- overrideUdevSyncCheck: defaultUdevSyncOverride,
- doBlkDiscard: true,
- thinpBlockSize: defaultThinpBlockSize,
- deviceIDMap: make([]byte, deviceIDMapSz),
- deletionWorkerTicker: time.NewTicker(time.Second * 30),
- uidMaps: uidMaps,
- gidMaps: gidMaps,
- minFreeSpacePercent: defaultMinFreeSpacePercent,
- }
-
- version, err := devicemapper.GetDriverVersion()
- if err != nil {
- // Can't even get driver version, assume not supported
- return nil, graphdriver.ErrNotSupported
- }
-
- if err := determineDriverCapabilities(version); err != nil {
- return nil, graphdriver.ErrNotSupported
- }
-
- if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport {
- // enable deferred stuff by default
- enableDeferredDeletion = true
- enableDeferredRemoval = true
- }
-
- foundBlkDiscard := false
- var lvmSetupConfig directLVMConfig
- testMode := false
- for _, option := range options {
- key, val, err := parsers.ParseKeyValueOpt(option)
- if err != nil {
- return nil, err
- }
- key = strings.ToLower(key)
- switch key {
- case "dm.basesize":
- size, err := units.RAMInBytes(val)
- if err != nil {
- return nil, err
- }
- userBaseSize = true
- devices.baseFsSize = uint64(size)
- case "dm.loopdatasize":
- size, err := units.RAMInBytes(val)
- if err != nil {
- return nil, err
- }
- devices.dataLoopbackSize = size
- case "dm.loopmetadatasize":
- size, err := units.RAMInBytes(val)
- if err != nil {
- return nil, err
- }
- devices.metaDataLoopbackSize = size
- case "dm.fs":
- if val != ext4 && val != xfs {
- return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val)
- }
- devices.filesystem = val
- case "dm.mkfsarg":
- devices.mkfsArgs = append(devices.mkfsArgs, val)
- case "dm.mountopt", "devicemapper.mountopt":
- devices.mountOptions = joinMountOptions(devices.mountOptions, val)
- case "dm.metadatadev":
- devices.metadataDevice = val
- case "dm.metadata_size":
- devices.metaDataSize = val
- case "dm.datadev":
- devices.dataDevice = val
- case "dm.thinpooldev":
- devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/")
- case "dm.blkdiscard":
- foundBlkDiscard = true
- devices.doBlkDiscard, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
- case "dm.blocksize":
- size, err := units.RAMInBytes(val)
- if err != nil {
- return nil, err
- }
- // convert to 512b sectors
- devices.thinpBlockSize = uint32(size) >> 9
- case "dm.override_udev_sync_check":
- devices.overrideUdevSyncCheck, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
-
- case "dm.use_deferred_removal":
- enableDeferredRemoval, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
-
- case "dm.use_deferred_deletion":
- enableDeferredDeletion, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
-
- case "dm.metaDataSize":
- lvmSetupConfig.MetaDataSize = val
- case "dm.min_free_space":
- if !strings.HasSuffix(val, "%") {
- return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")
- }
-
- valstring := strings.TrimSuffix(val, "%")
- minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32)
- if err != nil {
- return nil, err
- }
-
- if minFreeSpacePercent >= 100 {
- return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val)
- }
-
- devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
- case "dm.xfs_nospace_max_retries":
- _, err := strconv.ParseUint(val, 10, 64)
- if err != nil {
- return nil, err
- }
- devices.xfsNospaceRetries = val
- case "dm.directlvm_device":
- lvmSetupConfig.Device = val
- case "dm.directlvm_device_force":
- lvmSetupConfigForce, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
- case "dm.thinp_percent":
- per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
- }
- if per >= 100 {
- return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
- }
- lvmSetupConfig.ThinpPercent = per
- case "dm.thinp_metapercent":
- per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
- }
- if per >= 100 {
- return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
- }
- lvmSetupConfig.ThinpMetaPercent = per
- case "dm.thinp_autoextend_percent":
- per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
- }
- if per > 100 {
- return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
- }
- lvmSetupConfig.AutoExtendPercent = per
- case "dm.thinp_autoextend_threshold":
- per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
- }
- if per > 100 {
- return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
- }
- lvmSetupConfig.AutoExtendThreshold = per
- case "dm.libdm_log_level":
- level, err := strconv.ParseInt(val, 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
- }
- if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
- return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
- }
- // Register a new logging callback with the specified level.
- devicemapper.LogInit(devicemapper.DefaultLogger{
- Level: int(level),
- })
- case "test":
- testMode, err = strconv.ParseBool(val)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("devmapper: Unknown option %s", key)
- }
- }
-
- if !testMode {
- if err := validateLVMConfig(lvmSetupConfig); err != nil {
- return nil, err
- }
- }
-
- devices.lvmSetupConfig = lvmSetupConfig
-
- // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
- if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
- devices.doBlkDiscard = false
- }
-
- if err := devices.initDevmapper(doInit); err != nil {
- return nil, err
- }
-
- return devices, nil
-}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
deleted file mode 100644
index 418b9e610..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// +build linux,cgo
-
-package devmapper
-
-// Definition of struct dm_task and sub structures (from lvm2)
-//
-// struct dm_ioctl {
-// /*
-// * The version number is made up of three parts:
-// * major - no backward or forward compatibility,
-// * minor - only backwards compatible,
-// * patch - both backwards and forwards compatible.
-// *
-// * All clients of the ioctl interface should fill in the
-// * version number of the interface that they were
-// * compiled with.
-// *
-// * All recognized ioctl commands (ie. those that don't
-// * return -ENOTTY) fill out this field, even if the
-// * command failed.
-// */
-// uint32_t version[3]; /* in/out */
-// uint32_t data_size; /* total size of data passed in
-// * including this struct */
-
-// uint32_t data_start; /* offset to start of data
-// * relative to start of this struct */
-
-// uint32_t target_count; /* in/out */
-// int32_t open_count; /* out */
-// uint32_t flags; /* in/out */
-
-// /*
-// * event_nr holds either the event number (input and output) or the
-// * udev cookie value (input only).
-// * The DM_DEV_WAIT ioctl takes an event number as input.
-// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
-// * use the field as a cookie to return in the DM_COOKIE
-// * variable with the uevents they issue.
-// * For output, the ioctls return the event number, not the cookie.
-// */
-// uint32_t event_nr; /* in/out */
-// uint32_t padding;
-
-// uint64_t dev; /* in/out */
-
-// char name[DM_NAME_LEN]; /* device name */
-// char uuid[DM_UUID_LEN]; /* unique identifier for
-// * the block device */
-// char data[7]; /* padding or data */
-// };
-
-// struct target {
-// uint64_t start;
-// uint64_t length;
-// char *type;
-// char *params;
-
-// struct target *next;
-// };
-
-// typedef enum {
-// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
-// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
-// } dm_add_node_t;
-
-// struct dm_task {
-// int type;
-// char *dev_name;
-// char *mangled_dev_name;
-
-// struct target *head, *tail;
-
-// int read_only;
-// uint32_t event_nr;
-// int major;
-// int minor;
-// int allow_default_major_fallback;
-// uid_t uid;
-// gid_t gid;
-// mode_t mode;
-// uint32_t read_ahead;
-// uint32_t read_ahead_flags;
-// union {
-// struct dm_ioctl *v4;
-// } dmi;
-// char *newname;
-// char *message;
-// char *geometry;
-// uint64_t sector;
-// int no_flush;
-// int no_open_count;
-// int skip_lockfs;
-// int query_inactive_table;
-// int suppress_identical_reload;
-// dm_add_node_t add_node;
-// uint64_t existing_table_size;
-// int cookie_set;
-// int new_uuid;
-// int secure_data;
-// int retry_remove;
-// int enable_checks;
-// int expected_errno;
-
-// char *uuid;
-// char *mangled_uuid;
-// };
-//
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
deleted file mode 100644
index d2f165e26..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// +build linux,cgo
-
-package devmapper
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "strconv"
-
- graphdriver "github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/devicemapper"
- "github.com/containers/storage/pkg/directory"
- "github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/locker"
- "github.com/containers/storage/pkg/mount"
- units "github.com/docker/go-units"
- "github.com/sirupsen/logrus"
- "golang.org/x/sys/unix"
-)
-
-const defaultPerms = os.FileMode(0555)
-
-func init() {
- graphdriver.Register("devicemapper", Init)
-}
-
-// Driver contains the device set mounted and the home directory
-type Driver struct {
- *DeviceSet
- home string
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
- ctr *graphdriver.RefCounter
- locker *locker.Locker
-}
-
-// Init creates a driver with the given home and the set of options.
-func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
- deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps)
- if err != nil {
- return nil, err
- }
-
- if err := mount.MakePrivate(home); err != nil {
- return nil, err
- }
-
- d := &Driver{
- DeviceSet: deviceSet,
- home: home,
- uidMaps: options.UIDMaps,
- gidMaps: options.GIDMaps,
- ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
- locker: locker.New(),
- }
-
- return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
-}
-
-func (d *Driver) String() string {
- return "devicemapper"
-}
-
-// Status returns the status about the driver in a printable format.
-// Information returned contains Pool Name, Data File, Metadata file, disk usage by
-// the data and metadata, etc.
-func (d *Driver) Status() [][2]string {
- s := d.DeviceSet.Status()
-
- status := [][2]string{
- {"Pool Name", s.PoolName},
- {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
- {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
- {"Backing Filesystem", s.BaseDeviceFS},
- {"Data file", s.DataFile},
- {"Metadata file", s.MetadataFile},
- {"Data Space Used", units.HumanSize(float64(s.Data.Used))},
- {"Data Space Total", units.HumanSize(float64(s.Data.Total))},
- {"Data Space Available", units.HumanSize(float64(s.Data.Available))},
- {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
- {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
- {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
- {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
- {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
- {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
- {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
- {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
- }
- if len(s.DataLoopback) > 0 {
- status = append(status, [2]string{"Data loop file", s.DataLoopback})
- }
- if len(s.MetadataLoopback) > 0 {
- status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
- }
- if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
- status = append(status, [2]string{"Library Version", vStr})
- }
- return status
-}
-
-// Metadata returns a map of information about the device.
-func (d *Driver) Metadata(id string) (map[string]string, error) {
- m, err := d.DeviceSet.exportDeviceMetadata(id)
-
- if err != nil {
- return nil, err
- }
-
- metadata := make(map[string]string)
- metadata["DeviceId"] = strconv.Itoa(m.deviceID)
- metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
- metadata["DeviceName"] = m.deviceName
- return metadata, nil
-}
-
-// Cleanup unmounts a device.
-func (d *Driver) Cleanup() error {
- err := d.DeviceSet.Shutdown(d.home)
-
- umountErr := mount.Unmount(d.home)
- // in case we have two errors, prefer the one from Shutdown()
- if err != nil {
- return err
- }
-
- return umountErr
-}
-
-// CreateFromTemplate creates a layer with the same contents and parent as another layer.
-func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
- return d.Create(id, template, opts)
-}
-
-// CreateReadWrite creates a layer that is writable for use as a container
-// file system.
-func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
- return d.Create(id, parent, opts)
-}
-
-// Create adds a device with a given id and the parent.
-func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
- var storageOpt map[string]string
- if opts != nil {
- storageOpt = opts.StorageOpt
- }
-
- if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
- return err
- }
-
- return nil
-}
-
-// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point.
-func (d *Driver) Remove(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- if !d.DeviceSet.HasDevice(id) {
- // Consider removing a non-existing device a no-op
- // This is useful to be able to progress on container removal
- // if the underlying device has gone away due to earlier errors
- return nil
- }
-
- // This assumes the device has been properly Get/Put:ed and thus is unmounted
- if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
- return fmt.Errorf("failed to remove device %s: %v", id, err)
- }
-
- // Most probably the mount point is already removed on Put()
- // (see DeviceSet.UnmountDevice()), but just in case it was not
- // let's try to remove it here as well, ignoring errors as
- // an older kernel can return EBUSY if e.g. the mount was leaked
- // to other mount namespaces. A failure to remove the container's
- // mount point is not important and should not be treated
- // as a failure to remove the container.
- mp := path.Join(d.home, "mnt", id)
- err := unix.Rmdir(mp)
- if err != nil && !os.IsNotExist(err) {
- logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err)
- }
-
- return nil
-}
-
-// Get mounts a device with given id into the root filesystem
-func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- mp := path.Join(d.home, "mnt", id)
- rootFs := path.Join(mp, "rootfs")
- if count := d.ctr.Increment(mp); count > 1 {
- return rootFs, nil
- }
-
- uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- d.ctr.Decrement(mp)
- return "", err
- }
-
- // Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil {
- d.ctr.Decrement(mp)
- return "", err
- }
- if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
- d.ctr.Decrement(mp)
- return "", err
- }
-
- // Mount the device
- if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
- d.ctr.Decrement(mp)
- return "", err
- }
-
- if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil {
- d.ctr.Decrement(mp)
- d.DeviceSet.UnmountDevice(id, mp)
- return "", err
- }
-
- idFile := path.Join(mp, "id")
- if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
- // Create an "id" file with the container/image id in it to help reconstruct this in case
- // of later problems
- if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
- d.ctr.Decrement(mp)
- d.DeviceSet.UnmountDevice(id, mp)
- return "", err
- }
- }
-
- return rootFs, nil
-}
-
-// Put unmounts a device and removes it.
-func (d *Driver) Put(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- mp := path.Join(d.home, "mnt", id)
- if count := d.ctr.Decrement(mp); count > 0 {
- return nil
- }
-
- err := d.DeviceSet.UnmountDevice(id, mp)
- if err != nil {
- logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err)
- }
-
- return err
-}
-
-// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
-// For devmapper, it queries the mnt path for this ID.
-func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- return directory.Usage(path.Join(d.home, "mnt", id))
-}
-
-// Exists checks to see if the device exists.
-func (d *Driver) Exists(id string) bool {
- return d.DeviceSet.HasDevice(id)
-}
-
-// AdditionalImageStores returns additional image stores supported by the driver
-func (d *Driver) AdditionalImageStores() []string {
- return nil
-}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
deleted file mode 100644
index 54db6ab4a..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package devmapper
-
-import jsoniter "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go
deleted file mode 100644
index 41e73faf5..000000000
--- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// +build linux,cgo
-
-package devmapper
-
-import (
- "bytes"
- "fmt"
- "os"
- "path/filepath"
-
- "golang.org/x/sys/unix"
-)
-
-// FIXME: this is copy-pasted from the aufs driver.
-// It should be moved into the core.
-
-// Mounted returns true if a mount point exists.
-func Mounted(mountpoint string) (bool, error) {
- var mntpointSt unix.Stat_t
- if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
- if os.IsNotExist(err) {
- return false, nil
- }
- return false, err
- }
- var parentSt unix.Stat_t
- if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
- return false, err
- }
- return mntpointSt.Dev != parentSt.Dev, nil
-}
-
-type probeData struct {
- fsName string
- magic string
- offset uint64
-}
-
-// ProbeFsType returns the filesystem name for the given device id.
-func ProbeFsType(device string) (string, error) {
- probes := []probeData{
- {"btrfs", "_BHRfS_M", 0x10040},
- {"ext4", "\123\357", 0x438},
- {"xfs", "XFSB", 0},
- }
-
- maxLen := uint64(0)
- for _, p := range probes {
- l := p.offset + uint64(len(p.magic))
- if l > maxLen {
- maxLen = l
- }
- }
-
- file, err := os.Open(device)
- if err != nil {
- return "", err
- }
- defer file.Close()
-
- buffer := make([]byte, maxLen)
- l, err := file.Read(buffer)
- if err != nil {
- return "", err
- }
-
- if uint64(l) != maxLen {
- return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
- }
-
- for _, p := range probes {
- if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
- return p.fsName, nil
- }
- }
-
- return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
-}
-
-func joinMountOptions(a, b string) string {
- if a == "" {
- return b
- }
- if b == "" {
- return a
- }
- return a + "," + b
-}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 770b431bd..dc9b12c3c 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -1,20 +1,20 @@
package graphdriver
import (
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/vbatts/tar-split/tar/storage"
-
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
)
// FsMagic unsigned id of the filesystem in use.
@@ -39,7 +39,7 @@ var (
ErrLayerUnknown = errors.New("unknown layer")
)
-//CreateOpts contains optional arguments for Create() and CreateReadWrite()
+// CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string
@@ -48,13 +48,13 @@ type CreateOpts struct {
ignoreChownErrors bool
}
-// MountOpts contains optional arguments for LayerStope.Mount() methods.
+// MountOpts contains optional arguments for Driver.Get() methods.
type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
- UidMaps []idtools.IDMap // nolint: golint
- GidMaps []idtools.IDMap // nolint: golint
+ UidMaps []idtools.IDMap //nolint: revive,golint
+ GidMaps []idtools.IDMap //nolint: revive,golint
Options []string
// Volatile specifies whether the container storage can be optimized
@@ -74,6 +74,13 @@ type ApplyDiffOpts struct {
ForceMask *os.FileMode
}
+// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods.
+type ApplyDiffWithDifferOpts struct {
+ ApplyDiffOpts
+
+ Flags map[string]interface{}
+}
+
// InitFunc initializes the storage driver.
type InitFunc func(homedir string, options Options) (Driver, error)
@@ -110,6 +117,13 @@ type ProtoDriver interface {
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
+ // Returns a list of layer ids that exist on this driver (does not include
+ // additional storage layers). Not supported by all backends.
+ // If the driver requires that layers be removed in a particular order,
+ // usually due to parent-child relationships that it cares about, The
+ // list should be sorted well enough so that if all layers need to be
+ // removed, they can be removed in the order in which they're returned.
+ ListLayers() ([]string, error)
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
@@ -175,18 +189,64 @@ type Driver interface {
type DriverWithDifferOutput struct {
Differ Differ
Target string
- Size int64
+ Size int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set.
UIDs []uint32
GIDs []uint32
UncompressedDigest digest.Digest
+ CompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
+ TarSplit []byte // nil if not available
+ TOCDigest digest.Digest
+ // RootDirMode is the mode of the root directory of the layer, if specified.
+ RootDirMode *os.FileMode
+ // Artifacts is a collection of additional artifacts
+ // generated by the differ that the storage driver can use.
+ Artifacts map[string]interface{}
+}
+
+type DifferOutputFormat int
+
+const (
+ // DifferOutputFormatDir means the output is a directory and it will
+ // keep the original layout.
+ DifferOutputFormatDir = iota
+ // DifferOutputFormatFlat will store the files by their checksum, in the form
+ // checksum[0:2]/checksum[2:]
+ DifferOutputFormatFlat
+)
+
+// DifferFsVerity is a part of the experimental Differ interface and should not be used from outside of c/storage.
+// It configures the fsverity requirement.
+type DifferFsVerity int
+
+const (
+ // DifferFsVerityDisabled means no fs-verity is used
+ DifferFsVerityDisabled = iota
+
+ // DifferFsVerityIfAvailable means fs-verity is used when supported by
+ // the underlying kernel and filesystem.
+ DifferFsVerityIfAvailable
+
+ // DifferFsVerityRequired means fs-verity is required. Note this is not
+ // currently set or exposed by the overlay driver.
+ DifferFsVerityRequired
+)
+
+// DifferOptions is a part of the experimental Differ interface and should not be used from outside of c/storage.
+// It overrides how the differ works.
+type DifferOptions struct {
+ // Format defines the destination directory layout format
+ Format DifferOutputFormat
+
+ // UseFsVerity defines whether fs-verity is used
+ UseFsVerity DifferFsVerity
}
// Differ defines the interface for using a custom differ.
// This API is experimental and can be changed without bumping the major version number.
type Differ interface {
- ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error)
+ ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
}
// DriverWithDiffer is the interface for direct diff access.
@@ -194,10 +254,10 @@ type Differ interface {
type DriverWithDiffer interface {
Driver
// ApplyDiffWithDiffer applies the changes using the callback function.
- // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
- ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error)
- // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
- ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error
+ // The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory.
+ ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
+ // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
+ ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the location where files are stored for the layer.
@@ -221,7 +281,7 @@ type CapabilityDriver interface {
Capabilities() Capabilities
}
-// AdditionalLayer reprents a layer that is stored in the additional layer store
+// AdditionalLayer represents a layer that is stored in the additional layer store
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayer interface {
// CreateAs creates a new layer from this additional layer
@@ -244,8 +304,8 @@ type AdditionalLayerStoreDriver interface {
Driver
// LookupAdditionalLayer looks up additional layer store by the specified
- // digest and ref and returns an object representing that layer.
- LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error)
+ // TOC digest and ref and returns an object representing that layer.
+ LookupAdditionalLayer(tocDigest digest.Digest, ref string) (AdditionalLayer, error)
// LookupAdditionalLayer looks up additional layer store by the specified
// ID and returns an object representing that layer.
@@ -279,10 +339,18 @@ func init() {
drivers = make(map[string]InitFunc)
}
+// MustRegister registers an InitFunc for the driver, or panics.
+// It is suitable for package’s init() sections.
+func MustRegister(name string, initFunc InitFunc) {
+ if err := Register(name, initFunc); err != nil {
+ panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err))
+ }
+}
+
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
- return fmt.Errorf("Name already registered %s", name)
+ return fmt.Errorf("name already registered %s", name)
}
drivers[name] = initFunc
@@ -296,7 +364,7 @@ func GetDriver(name string, config Options) (Driver, error) {
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
- return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
+ return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported)
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
@@ -305,16 +373,16 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
return initFunc(filepath.Join(home, name), options)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
- return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
+ return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported)
}
// Options is used to initialize a graphdriver
type Options struct {
Root string
RunRoot string
+ ImageStore string
+ DriverPriority []string
DriverOptions []string
- UIDMaps []idtools.IDMap
- GIDMaps []idtools.IDMap
ExperimentalEnabled bool
}
@@ -326,10 +394,19 @@ func New(name string, config Options) (Driver, error) {
}
// Guess for prior driver
- driversMap := scanPriorDrivers(config.Root)
- for _, name := range priority {
- if name == "vfs" {
- // don't use vfs even if there is state present.
+ driversMap := ScanPriorDrivers(config.Root)
+
+ // use the supplied priority list unless it is empty
+ prioList := config.DriverPriority
+ if len(prioList) == 0 {
+ prioList = Priority
+ }
+
+ for _, name := range prioList {
+ if name == "vfs" && len(config.DriverPriority) == 0 {
+ // don't use vfs even if there is state present and vfs
+ // has not been explicitly added to the override driver
+ // priority list
continue
}
if _, prior := driversMap[name]; prior {
@@ -362,7 +439,7 @@ func New(name string, config Options) (Driver, error) {
}
// Check for priority drivers first
- for _, name := range priority {
+ for _, name := range prioList {
driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil {
if isDriverNotSupported(err) {
@@ -384,25 +461,42 @@ func New(name string, config Options) (Driver, error) {
}
return driver, nil
}
- return nil, fmt.Errorf("No supported storage backend found")
+ return nil, fmt.Errorf("no supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
- cause := errors.Cause(err)
- return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS
+ return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS)
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
-func scanPriorDrivers(root string) map[string]bool {
+func ScanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
- if _, err := os.Stat(p); err == nil && driver != "vfs" {
+ if err := fileutils.Exists(p); err == nil {
driversMap[driver] = true
}
}
return driversMap
}
+
+// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging.
+// Typical usage:
+//
+// func …(…) (err error) {
+// …
+// defer driverPut(driver, id, &err)
+// }
+func driverPut(driver ProtoDriver, id string, mainErr *error) {
+ if err := driver.Put(id); err != nil {
+ err = fmt.Errorf("unmounting layer %s: %w", id, err)
+ if *mainErr == nil {
+ *mainErr = err
+ } else {
+ logrus.Error(err)
+ }
+ }
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go
new file mode 100644
index 000000000..b60883a9e
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go
@@ -0,0 +1,12 @@
+package graphdriver
+
+// Slice of drivers that should be used in order
+var Priority = []string{
+ "vfs",
+}
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ // Note it is OK to return FsMagicUnsupported on Windows.
+ return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
index e1320ee07..a6072ab56 100644
--- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -2,15 +2,42 @@ package graphdriver
import (
"golang.org/x/sys/unix"
+
+ "github.com/containers/storage/pkg/mount"
+)
+
+const (
+ // FsMagicZfs filesystem id for Zfs
+ FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"zfs",
+ "vfs",
+ }
+
+ // FsNames maps filesystem id to name of the filesystem.
+ FsNames = map[FsMagic]string{
+ FsMagicZfs: "zfs",
}
)
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+// No-op on FreeBSD.
+func NewDefaultChecker() Checker {
+ return &defaultChecker{}
+}
+
+type defaultChecker struct{}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+ m, _ := mount.Mounted(path)
+ return m
+}
+
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index dddf8a8b4..d730dc38a 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package graphdriver
@@ -6,6 +6,7 @@ import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
+ "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -50,14 +51,48 @@ const (
FsMagicOverlay = FsMagic(0x794C7630)
// FsMagicFUSE filesystem id for FUSE
FsMagicFUSE = FsMagic(0x65735546)
+ // FsMagicAcfs filesystem id for Acfs
+ FsMagicAcfs = FsMagic(0x61636673)
+ // FsMagicAfs filesystem id for Afs
+ FsMagicAfs = FsMagic(0x5346414f)
+ // FsMagicCephFs filesystem id for Ceph
+ FsMagicCephFs = FsMagic(0x00C36400)
+ // FsMagicCIFS filesystem id for CIFS
+ FsMagicCIFS = FsMagic(0xFF534D42)
+ // FsMagicEROFS filesystem id for EROFS
+ FsMagicEROFS = FsMagic(0xE0F5E1E2)
+ // FsMagicFHGFS filesystem id for FHGFS
+ FsMagicFHGFSFs = FsMagic(0x19830326)
+ // FsMagicIBRIX filesystem id for IBRIX
+ FsMagicIBRIX = FsMagic(0x013111A8)
+ // FsMagicKAFS filesystem id for KAFS
+ FsMagicKAFS = FsMagic(0x6B414653)
+ // FsMagicLUSTRE filesystem id for LUSTRE
+ FsMagicLUSTRE = FsMagic(0x0BD00BD0)
+ // FsMagicNCP filesystem id for NCP
+ FsMagicNCP = FsMagic(0x564C)
+ // FsMagicNFSD filesystem id for NFSD
+ FsMagicNFSD = FsMagic(0x6E667364)
+ // FsMagicOCFS2 filesystem id for OCFS2
+ FsMagicOCFS2 = FsMagic(0x7461636F)
+ // FsMagicPANFS filesystem id for PANFS
+ FsMagicPANFS = FsMagic(0xAAD7AAEA)
+ // FsMagicPRLFS filesystem id for PRLFS
+ FsMagicPRLFS = FsMagic(0x7C7C6673)
+ // FsMagicSMB2 filesystem id for SMB2
+ FsMagicSMB2 = FsMagic(0xFE534D42)
+ // FsMagicSNFS filesystem id for SNFS
+ FsMagicSNFS = FsMagic(0xBEEFDEAD)
+ // FsMagicVBOXSF filesystem id for VBOXSF
+ FsMagicVBOXSF = FsMagic(0x786F4256)
+ // FsMagicVXFS filesystem id for VXFS
+ FsMagicVXFS = FsMagic(0xA501FCF5)
)
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"overlay",
- // We don't support devicemapper without configuration
- // "devicemapper",
"aufs",
"btrfs",
"zfs",
@@ -70,6 +105,7 @@ var (
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicEcryptfs: "ecryptfs",
+ FsMagicEROFS: "erofs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
@@ -92,9 +128,14 @@ var (
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf unix.Statfs_t
- if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+ path := filepath.Dir(rootpath)
+ if err := unix.Statfs(path, &buf); err != nil {
return 0, err
}
+
+ if _, ok := FsNames[FsMagic(buf.Type)]; !ok {
+ logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path)
+ }
return FsMagic(buf.Type), nil
}
@@ -120,19 +161,39 @@ func NewDefaultChecker() Checker {
return &defaultChecker{}
}
-type defaultChecker struct {
-}
+type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
+// isMountPoint checks that the given path is a mount point
+func isMountPoint(mountPath string) (bool, error) {
+ // it is already the root
+ if mountPath == "/" {
+ return true, nil
+ }
+
+ var s1, s2 unix.Stat_t
+ if err := unix.Stat(mountPath, &s1); err != nil {
+ return true, err
+ }
+ if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil {
+ return true, err
+ }
+ return s1.Dev != s2.Dev, nil
+}
+
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
+
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
- return FsMagic(buf.Type) == fsType, nil
+ if FsMagic(buf.Type) != fsType {
+ return false, nil
+ }
+ return isMountPoint(mountPath)
}
diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go
index 174fa9670..47749c6ef 100644
--- a/vendor/github.com/containers/storage/drivers/driver_solaris.go
+++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go
@@ -1,4 +1,4 @@
-// +build solaris,cgo
+//go:build solaris && cgo
package graphdriver
@@ -15,6 +15,7 @@ static inline struct statvfs *getstatfs(char *s) {
}
*/
import "C"
+
import (
"path/filepath"
"unsafe"
@@ -30,7 +31,7 @@ const (
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"zfs",
}
@@ -68,8 +69,7 @@ func NewDefaultChecker() Checker {
return &defaultChecker{}
}
-type defaultChecker struct {
-}
+type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
@@ -77,9 +77,8 @@ func (c *defaultChecker) IsMounted(path string) bool {
}
// Mounted checks if the given path is mounted as the fs type
-//Solaris supports only ZFS for now
+// Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
-
cs := C.CString(filepath.Dir(mountPath))
defer C.free(unsafe.Pointer(cs))
buf := C.getstatfs(cs)
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
index 4a875608b..dcf169b4d 100644
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -1,13 +1,11 @@
-// +build !linux,!windows,!freebsd,!solaris
+//go:build !linux && !windows && !freebsd && !solaris && !darwin
package graphdriver
-var (
- // Slice of drivers that should be used in an order
- priority = []string{
- "unsupported",
- }
-)
+// Slice of drivers that should be used in an order
+var Priority = []string{
+ "unsupported",
+}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go
index ffd30c295..54bd139a3 100644
--- a/vendor/github.com/containers/storage/drivers/driver_windows.go
+++ b/vendor/github.com/containers/storage/drivers/driver_windows.go
@@ -1,11 +1,9 @@
package graphdriver
-var (
- // Slice of drivers that should be used in order
- priority = []string{
- "windowsfilter",
- }
-)
+// Slice of drivers that should be used in order
+var Priority = []string{
+ "windowsfilter",
+}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 93743d177..e500585ff 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -2,21 +2,21 @@ package graphdriver
import (
"io"
+ "os"
+ "runtime"
"time"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
-var (
- // ApplyUncompressedLayer defines the unpack method used by the graph
- // driver.
- ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
-)
+// ApplyUncompressedLayer defines the unpack method used by the graph
+// driver.
+var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
@@ -31,10 +31,11 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
-// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
-// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
-// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
-// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
+//
+// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
+// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
+// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
+// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
}
@@ -54,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
options := MountOpts{
MountLabel: mountLabel,
+ Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {
@@ -62,7 +64,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
defer func() {
if err != nil {
- driver.Put(id)
+ driverPut(driver, id, &err)
}
}()
@@ -77,7 +79,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
- driver.Put(id)
+ driverPut(driver, id, &err)
return err
}), nil
}
@@ -87,7 +89,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
if err != nil {
return nil, err
}
- defer driver.Put(parent)
+ defer driverPut(driver, parent, &err)
changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
if err != nil {
@@ -101,20 +103,20 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
- driver.Put(id)
+ driverPut(driver, id, &err)
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
- time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+ time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
-func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) {
+func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) {
driver := gdw.ProtoDriver
if idMappings == nil {
@@ -126,24 +128,22 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
options := MountOpts{
MountLabel: mountLabel,
+ Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {
return nil, err
}
- defer driver.Put(id)
+ defer driverPut(driver, id, &retErr)
parentFs := ""
if parent != "" {
- options := MountOpts{
- MountLabel: mountLabel,
- }
parentFs, err = driver.Get(parent, options)
if err != nil {
return nil, err
}
- defer driver.Put(parent)
+ defer driverPut(driver, parent, &retErr)
}
return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
@@ -167,11 +167,18 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
if err != nil {
return
}
- defer driver.Put(id)
+ defer driverPut(driver, id, &err)
+
+ defaultForceMask := os.FileMode(0o700)
+ var forceMask *os.FileMode // = nil
+ if runtime.GOOS == "darwin" {
+ forceMask = &defaultForceMask
+ }
tarOptions := &archive.TarOptions{
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
IgnoreChownErrors: options.IgnoreChownErrors,
+ ForceMask: forceMask,
}
if options.Mappings != nil {
tarOptions.UIDMaps = options.Mappings.UIDs()
@@ -180,7 +187,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil {
- logrus.Errorf("Error while applying layer: %s", err)
+ logrus.Errorf("While applying layer: %s", err)
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
@@ -213,7 +220,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
if err != nil {
return
}
- defer driver.Put(id)
+ defer driverPut(driver, id, &err)
return archive.ChangesSize(layerFs, changes), nil
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index 43fe00625..527701746 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -1,21 +1,22 @@
-// +build linux
+//go:build linux
package overlay
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idmap"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -25,7 +26,7 @@ import (
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d, mountOpts string) error {
- td, err := ioutil.TempDir(d, "opaque-bug-check")
+ td, err := os.MkdirTemp(d, "opaque-bug-check")
if err != nil {
return err
}
@@ -36,37 +37,42 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
- if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil {
return err
}
- if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil {
return err
}
- if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return err
}
// Mark l2/d as opaque
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil {
- return errors.Wrap(err, "failed to set opaque flag on middle layer")
+ return fmt.Errorf("failed to set opaque flag on middle layer: %w", err)
}
- opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
+ mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s"
+ if unshare.IsRootless() {
+ mountFlags = mountFlags + ",userxattr"
+ }
+
+ opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
flags, data := mount.ParseOptions(mountOpts)
if data != "" {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- return errors.Wrap(err, "failed to mount overlay")
+ return fmt.Errorf("failed to mount overlay: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -75,14 +81,14 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
- if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
- return errors.Wrap(err, "failed to write to merged directory")
+ if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil {
+ return fmt.Errorf("failed to write to merged directory: %w", err)
}
// Check l3/d does not have opaque flag
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque"))
if err != nil {
- return errors.Wrap(err, "failed to read opaque flag on upper layer")
+ return fmt.Errorf("failed to read opaque flag on upper layer: %w", err)
}
if string(xattrOpaque) == "y" {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
@@ -94,12 +100,12 @@ func doesSupportNativeDiff(d, mountOpts string) error {
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
- return errors.Wrap(err, "failed to rename dir in merged directory")
+ return fmt.Errorf("failed to rename dir in merged directory: %w", err)
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect"))
if err != nil {
- return errors.Wrap(err, "failed to read redirect flag on upper layer")
+ return fmt.Errorf("failed to read redirect flag on upper layer: %w", err)
}
if string(xattrRedirect) == "d1" {
@@ -114,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// copying up a file from a lower layer unless/until its contents are being
// modified
func doesMetacopy(d, mountOpts string) (bool, error) {
- td, err := ioutil.TempDir(d, "metacopy-check")
+ td, err := os.MkdirTemp(d, "metacopy-check")
if err != nil {
return false, err
}
@@ -125,19 +131,19 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}()
// Make directories l1, l2, work, merged
- if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil {
return false, err
}
- if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil {
+ if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil {
return false, err
}
- if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
@@ -150,11 +156,11 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- if errors.Cause(err) == unix.EINVAL {
- logrus.Info("metacopy option not supported on this kernel", mountOpts)
+ if errors.Is(err, unix.EINVAL) {
+ logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
return false, nil
}
- return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts)
+ return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -163,8 +169,8 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}()
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
// as a metadata-only copy
- if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
- return false, errors.Wrap(err, "error changing permissions on file for metacopy check")
+ if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil {
+ return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
}
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
if err != nil {
@@ -172,14 +178,14 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
logrus.Info("metacopy option not supported")
return false, nil
}
- return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer")
+ return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err)
}
return metacopy != nil, nil
}
// doesVolatile checks if the filesystem supports the "volatile" mount option
func doesVolatile(d string) (bool, error) {
- td, err := ioutil.TempDir(d, "volatile-check")
+ td, err := os.MkdirTemp(d, "volatile-check")
if err != nil {
return false, err
}
@@ -189,22 +195,25 @@ func doesVolatile(d string) (bool, error) {
}
}()
- if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil {
return false, err
}
- if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
+ if unshare.IsRootless() {
+ opts = fmt.Sprintf("%s,userxattr", opts)
+ }
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
- return false, errors.Wrapf(err, "failed to mount overlay for volatile check")
+ return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -213,3 +222,92 @@ func doesVolatile(d string) (bool, error) {
}()
return true, nil
}
+
+// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
+// a idmapped lower layer.
+func supportsIdmappedLowerLayers(home string) (bool, error) {
+ layerDir, err := os.MkdirTemp(home, "compat")
+ if err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = os.RemoveAll(layerDir)
+ }()
+
+ mergedDir := filepath.Join(layerDir, "merged")
+ lowerDir := filepath.Join(layerDir, "lower")
+ lowerMappedDir := filepath.Join(layerDir, "lower-mapped")
+ upperDir := filepath.Join(layerDir, "upper")
+ workDir := filepath.Join(layerDir, "work")
+
+ _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(workDir, 0o700, 0, 0)
+
+ mapping := []idtools.IDMap{
+ {
+ ContainerID: 0,
+ HostID: 0,
+ Size: 1,
+ },
+ }
+ pid, cleanupFunc, err := idmap.CreateUsernsProcess(mapping, mapping)
+ if err != nil {
+ return false, err
+ }
+ defer cleanupFunc()
+
+ if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
+ return false, fmt.Errorf("create mapped mount: %w", err)
+ }
+ defer func() {
+ if err := unix.Unmount(lowerMappedDir, unix.MNT_DETACH); err != nil {
+ logrus.Warnf("Unmount %q: %v", lowerMappedDir, err)
+ }
+ }()
+
+ opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
+ flags := uintptr(0)
+ if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = unix.Unmount(mergedDir, unix.MNT_DETACH)
+ }()
+ return true, nil
+}
+
+// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system
+// that uses data-only layers.
+func supportsDataOnlyLayers(home string) (bool, error) {
+ layerDir, err := os.MkdirTemp(home, "compat")
+ if err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = os.RemoveAll(layerDir)
+ }()
+
+ mergedDir := filepath.Join(layerDir, "merged")
+ lowerDir := filepath.Join(layerDir, "lower")
+ lowerDirDataOnly := filepath.Join(layerDir, "lower-data")
+ upperDir := filepath.Join(layerDir, "upper")
+ workDir := filepath.Join(layerDir, "work")
+
+ _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0)
+ _ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(workDir, 0o700, 0, 0)
+
+ opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir)
+ flags := uintptr(0)
+ if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
+ return false, err
+ }
+ _ = unix.Unmount(mergedDir, unix.MNT_DETACH)
+
+ return true, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
new file mode 100644
index 000000000..7867d5006
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
@@ -0,0 +1,44 @@
+//go:build linux
+
+package overlay
+
+import (
+ "errors"
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func scanForMountProgramIndicators(home string) (detected bool, err error) {
+ err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error {
+ if detected {
+ return fs.SkipDir
+ }
+ if err != nil {
+ return err
+ }
+ basename := filepath.Base(path)
+ if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
+ detected = true
+ return fs.SkipDir
+ }
+ if d.IsDir() {
+ xattrs, err := system.Llistxattr(path)
+ if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
+ return err
+ }
+ for _, xattr := range xattrs {
+ if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") {
+ detected = true
+ return fs.SkipDir
+ }
+ }
+ }
+ return nil
+ })
+ return detected, err
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go
new file mode 100644
index 000000000..1fb3e62a4
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go
@@ -0,0 +1,225 @@
+//go:build linux && cgo
+
+package overlay
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/containers/storage/pkg/chunked/dump"
+ "github.com/containers/storage/pkg/fsverity"
+ "github.com/containers/storage/pkg/loopback"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ composeFsHelperOnce sync.Once
+ composeFsHelperPath string
+ composeFsHelperErr error
+
+ // skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
+ // does not support it. Mounting directly via a file will be supported in kernel 6.12.
+ skipMountViaFile atomic.Bool
+)
+
+func getComposeFsHelper() (string, error) {
+ composeFsHelperOnce.Do(func() {
+ composeFsHelperPath, composeFsHelperErr = exec.LookPath("mkcomposefs")
+ })
+ return composeFsHelperPath, composeFsHelperErr
+}
+
+func getComposefsBlob(dataDir string) string {
+ return filepath.Join(dataDir, "composefs.blob")
+}
+
+func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
+ if err := os.MkdirAll(composefsDir, 0o700); err != nil {
+ return err
+ }
+
+ dumpReader, err := dump.GenerateDump(toc, verityDigests)
+ if err != nil {
+ return err
+ }
+
+ destFile := getComposefsBlob(composefsDir)
+ writerJson, err := getComposeFsHelper()
+ if err != nil {
+ return fmt.Errorf("failed to find mkcomposefs: %w", err)
+ }
+
+ outFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o644)
+ if err != nil {
+ return err
+ }
+
+ roFile, err := os.Open(fmt.Sprintf("/proc/self/fd/%d", outFile.Fd()))
+ if err != nil {
+ outFile.Close()
+ return fmt.Errorf("failed to reopen %s as read-only: %w", destFile, err)
+ }
+
+ err = func() error {
+ // a scope to close outFile before setting fsverity on the read-only fd.
+ defer outFile.Close()
+
+ errBuf := &bytes.Buffer{}
+ cmd := exec.Command(writerJson, "--from-file", "-", "-")
+ cmd.Stderr = errBuf
+ cmd.Stdin = dumpReader
+ cmd.Stdout = outFile
+ if err := cmd.Run(); err != nil {
+ rErr := fmt.Errorf("failed to convert json to erofs: %w", err)
+ exitErr := &exec.ExitError{}
+ if errors.As(err, &exitErr) {
+ return fmt.Errorf("%w: %s", rErr, strings.TrimSpace(errBuf.String()))
+ }
+ return rErr
+ }
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+
+ if err := fsverity.EnableVerity("manifest file", int(roFile.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
+ logrus.Warningf("%s", err)
+ }
+
+ return nil
+}
+
+/*
+typedef enum {
+ LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0),
+} lcfs_erofs_flag_t;
+
+struct lcfs_erofs_header_s {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t flags;
+ uint32_t unused[5];
+} __attribute__((__packed__));
+*/
+
+// hasACL returns true if the erofs blob has ACLs enabled
+func hasACL(path string) (bool, error) {
+ const (
+ LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
+ versionNumberSize = 4
+ magicNumberSize = 4
+ flagsSize = 4
+ )
+
+ file, err := os.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+
+ // do not worry about checking the magic number, if the file is invalid
+ // we will fail to mount it anyway
+ buffer := make([]byte, versionNumberSize+magicNumberSize+flagsSize)
+ nread, err := file.Read(buffer)
+ if err != nil {
+ return false, err
+ }
+ if nread != len(buffer) {
+ return false, fmt.Errorf("failed to read flags from %q", path)
+ }
+ flags := buffer[versionNumberSize+magicNumberSize:]
+ return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
+}
+
+func openBlobFile(blobFile string, hasACL, useLoopDevice bool) (int, error) {
+ if useLoopDevice {
+ loop, err := loopback.AttachLoopDeviceRO(blobFile)
+ if err != nil {
+ return -1, err
+ }
+ defer loop.Close()
+
+ blobFile = loop.Name()
+ }
+
+ fsfd, err := unix.Fsopen("erofs", 0)
+ if err != nil {
+ return -1, fmt.Errorf("failed to open erofs filesystem: %w", err)
+ }
+ defer unix.Close(fsfd)
+
+ if err := unix.FsconfigSetString(fsfd, "source", blobFile); err != nil {
+ return -1, fmt.Errorf("failed to set source for erofs filesystem: %w", err)
+ }
+
+ if err := unix.FsconfigSetFlag(fsfd, "ro"); err != nil {
+ return -1, fmt.Errorf("failed to set erofs filesystem read-only: %w", err)
+ }
+
+ if !hasACL {
+ if err := unix.FsconfigSetFlag(fsfd, "noacl"); err != nil {
+ return -1, fmt.Errorf("failed to set noacl for erofs filesystem: %w", err)
+ }
+ }
+
+ if err := unix.FsconfigCreate(fsfd); err != nil {
+ buffer := make([]byte, 4096)
+ if n, _ := unix.Read(fsfd, buffer); n > 0 {
+ return -1, fmt.Errorf("failed to create erofs filesystem: %s: %w", strings.TrimSuffix(string(buffer[:n]), "\n"), err)
+ }
+ return -1, fmt.Errorf("failed to create erofs filesystem: %w", err)
+ }
+
+ mfd, err := unix.Fsmount(fsfd, 0, unix.MOUNT_ATTR_RDONLY)
+ if err != nil {
+ buffer := make([]byte, 4096)
+ if n, _ := unix.Read(fsfd, buffer); n > 0 {
+ return -1, fmt.Errorf("failed to mount erofs filesystem: %s: %w", string(buffer[:n]), err)
+ }
+ return -1, fmt.Errorf("failed to mount erofs filesystem: %w", err)
+ }
+ return mfd, nil
+}
+
+func openComposefsMount(dataDir string) (int, error) {
+ blobFile := getComposefsBlob(dataDir)
+
+ hasACL, err := hasACL(blobFile)
+ if err != nil {
+ return -1, err
+ }
+
+ if !skipMountViaFile.Load() {
+ fd, err := openBlobFile(blobFile, hasACL, false)
+ if err == nil || !errors.Is(err, unix.ENOTBLK) {
+ return fd, err
+ }
+ logrus.Debugf("The current kernel doesn't support mounting EROFS directly from a file, fallback to a loopback device")
+ skipMountViaFile.Store(true)
+ }
+
+ return openBlobFile(blobFile, hasACL, true)
+}
+
+func mountComposefsBlob(dataDir, mountPoint string) error {
+ mfd, err := openComposefsMount(dataDir)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(mfd)
+
+ if err := unix.MoveMount(mfd, "", unix.AT_FDCWD, mountPoint, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
+ return fmt.Errorf("failed to move mount to %q: %w", mountPoint, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
index 2a1e9d0cc..ca32c62b6 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
@@ -1,3 +1,5 @@
+//go:build linux
+
package overlay
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index 7c8fd50a3..b3ddac022 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package overlay
@@ -7,14 +7,16 @@ import (
"flag"
"fmt"
"os"
+ "path/filepath"
"runtime"
+ "strings"
"github.com/containers/storage/pkg/reexec"
"golang.org/x/sys/unix"
)
func init() {
- reexec.Register("storage-mountfrom", mountFromMain)
+ reexec.Register("storage-mountfrom", mountOverlayFromMain)
}
func fatal(err error) {
@@ -30,7 +32,7 @@ type mountOptions struct {
Flag uint32
}
-func mountFrom(dir, device, target, mType string, flags uintptr, label string) error {
+func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error {
options := &mountOptions{
Device: device,
Target: target,
@@ -42,7 +44,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd := reexec.Command("storage-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
- return fmt.Errorf("mountfrom error on pipe creation: %v", err)
+ return fmt.Errorf("mountfrom error on pipe creation: %w", err)
}
output := bytes.NewBuffer(nil)
@@ -50,23 +52,23 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
+ return fmt.Errorf("mountfrom error on re-exec cmd: %w", err)
}
- //write the options to the pipe for the untar exec to read
+ // write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
+ return fmt.Errorf("mountfrom json encode to pipe failed: %w", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
- return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
+ return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err)
}
return nil
}
// mountfromMain is the entry-point for storage-mountfrom on re-exec.
-func mountFromMain() {
+func mountOverlayFromMain() {
runtime.LockOSThread()
flag.Parse()
@@ -76,11 +78,109 @@ func mountFromMain() {
fatal(err)
}
- if err := os.Chdir(flag.Arg(0)); err != nil {
+ // Mount the arguments passed from the specified directory. Some of the
+ // paths mentioned in the values we pass to the kernel are relative to
+ // the specified directory.
+ homedir := flag.Arg(0)
+ if err := os.Chdir(homedir); err != nil {
fatal(err)
}
- if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
+ pageSize := unix.Getpagesize()
+ if len(options.Label) < pageSize {
+ if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
+ fatal(err)
+ }
+ os.Exit(0)
+ }
+
+ // Those arguments still took up too much space. Open the diff
+ // directories and use their descriptor numbers as lowers, using
+ // /proc/self/fd as the current directory.
+
+ // Split out the various options, since we need to manipulate the
+ // paths, but we don't want to mess with other options.
+ var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
+ for _, arg := range strings.Split(options.Label, ",") {
+ key, val, _ := strings.Cut(arg, "=")
+ switch key {
+ case "upperdir":
+ upperk = "upperdir="
+ upperv = val
+ case "workdir":
+ workk = "workdir="
+ workv = val
+ case "lowerdir":
+ lowerk = "lowerdir="
+ lowerv = val
+ case "label":
+ labelk = "label="
+ labelv = val
+ default:
+ if others == "" {
+ others = arg
+ } else {
+ others = others + "," + arg
+ }
+ }
+ }
+
+ // Make sure upperdir, workdir, and the target are absolute paths.
+ if upperv != "" && !filepath.IsAbs(upperv) {
+ upperv = filepath.Join(homedir, upperv)
+ }
+ if workv != "" && !filepath.IsAbs(workv) {
+ workv = filepath.Join(homedir, workv)
+ }
+ if !filepath.IsAbs(options.Target) {
+ options.Target = filepath.Join(homedir, options.Target)
+ }
+
+ // Get a descriptor for each lower, and use that descriptor's name as
+ // the new value for the list of lowers, because it's shorter.
+ if lowerv != "" {
+ lowers := strings.Split(lowerv, ":")
+ var newLowers []string
+ dataOnly := false
+ for _, lowerPath := range lowers {
+ if lowerPath == "" {
+ dataOnly = true
+ continue
+ }
+ lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0)
+ if err != nil {
+ fatal(err)
+ }
+ var lower string
+ if dataOnly {
+ lower = fmt.Sprintf(":%d", lowerFd)
+ dataOnly = false
+ } else {
+ lower = fmt.Sprintf("%d", lowerFd)
+ }
+ newLowers = append(newLowers, lower)
+ }
+ lowerv = strings.Join(newLowers, ":")
+ }
+
+ // Reconstruct the Label field.
+ options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others
+ options.Label = strings.ReplaceAll(options.Label, ",,", ",")
+
+ // Okay, try this, if we managed to make the arguments fit.
+ var err error
+ if len(options.Label) < pageSize {
+ if err := os.Chdir("/proc/self/fd"); err != nil {
+ fatal(err)
+ }
+ err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label)
+ } else {
+ err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize)
+ }
+
+ // Clean up.
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label)
fatal(err)
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index d5d161bfd..caf896463 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -1,17 +1,19 @@
-// +build linux
+//go:build linux
package overlay
import (
"bytes"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"os/exec"
"path"
"path/filepath"
+ "slices"
"strconv"
"strings"
"sync"
@@ -23,9 +25,11 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/fsutils"
+ "github.com/containers/storage/pkg/idmap"
"github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/locker"
+ "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
@@ -33,23 +37,19 @@ import (
units "github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
-var (
- // untar defines the untar method
- untar = chrootarchive.UntarUncompressed
-)
+// untar defines the untar method
+var untar = chrootarchive.UntarUncompressed
const (
- defaultPerms = os.FileMode(0555)
- selinuxLabelTest = "system_u:object_r:container_file_t:s0"
+ defaultPerms = os.FileMode(0o555)
+ selinuxLabelTest = "system_u:object_r:container_file_t:s0"
+ mountProgramFlagFile = ".has-mount-program"
)
// This backend uses the overlay union filesystem for containers
@@ -76,21 +76,29 @@ const (
// or root directory. Mounts are always done relative to root and
// referencing the symbolic links in order to ensure the number of
// lower directories can fit in a single page for making the mount
-// syscall. A hard upper limit of 128 lower layers is enforced to ensure
+// syscall. A hard upper limit of 500 lower layers is enforced to ensure
// that mounts do not fail due to length.
const (
- linkDir = "l"
- lowerFile = "lower"
- maxDepth = 128
+ linkDir = "l"
+ stagingDir = "staging"
+ lowerFile = "lower"
+ maxDepth = 500
+
+ stagingLockFile = "staging.lock"
+
+ tocArtifact = "toc"
+ fsVerityDigestsArtifact = "fs-verity-digests"
// idLength represents the number of random characters
// which can be used to create the unique link identifier
// for every layer. If this value is too long then the
// page size limit for the mount command may be exceeded.
// The idLength should be selected such that following equation
- // is true (512 is a buffer for label metadata).
- // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512)
+ // is true (512 is a buffer for label metadata, 128 is the
+ // number of lowers we want to be able to use without having
+ // to use bind mounts to get all the way to the kernel limit).
+ // ((idLength + len(linkDir) + 1) * 128) <= (pageSize - 512)
idLength = 26
)
@@ -103,6 +111,7 @@ type overlayOptions struct {
mountOptions string
ignoreChownErrors bool
forceMask *os.FileMode
+ useComposefs bool
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
@@ -110,20 +119,23 @@ type Driver struct {
name string
home string
runhome string
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
+ imageStore string
ctr *graphdriver.RefCounter
quotaCtl *quota.Control
options overlayOptions
naiveDiff graphdriver.DiffDriver
supportsDType bool
supportsVolatile *bool
+ supportsDataOnly *bool
usingMetacopy bool
- locker *locker.Locker
+ usingComposefs bool
+
+ stagingDirsLocks map[string]*lockfile.LockFile
+
+ supportsIDMappedMounts *bool
}
type additionalLayerStore struct {
-
// path is the directory where this store is available on the host.
path string
@@ -142,59 +154,71 @@ var (
)
func init() {
- graphdriver.Register("overlay", Init)
- graphdriver.Register("overlay2", Init)
+ graphdriver.MustRegister("overlay", Init)
+ graphdriver.MustRegister("overlay2", Init)
}
func hasMetacopyOption(opts []string) bool {
- for _, s := range opts {
- if s == "metacopy=on" {
- return true
- }
- }
- return false
-}
-
-func hasVolatileOption(opts []string) bool {
- for _, s := range opts {
- if s == "volatile" {
- return true
- }
- }
- return false
+ return slices.Contains(opts, "metacopy=on")
}
func getMountProgramFlagFile(path string) string {
- return filepath.Join(path, ".has-mount-program")
+ return filepath.Join(path, mountProgramFlagFile)
}
func checkSupportVolatile(home, runhome string) (bool, error) {
- feature := fmt.Sprintf("volatile")
+ const feature = "volatile"
volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature)
var usingVolatile bool
if err == nil {
if volatileCacheResult {
- logrus.Debugf("cached value indicated that volatile is being used")
+ logrus.Debugf("Cached value indicated that volatile is being used")
} else {
- logrus.Debugf("cached value indicated that volatile is not being used")
+ logrus.Debugf("Cached value indicated that volatile is not being used")
}
usingVolatile = volatileCacheResult
} else {
usingVolatile, err = doesVolatile(home)
if err == nil {
if usingVolatile {
- logrus.Debugf("overlay test mount indicated that volatile is being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is being used")
} else {
- logrus.Debugf("overlay test mount indicated that volatile is not being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
- return false, errors.Wrap(err, "error recording volatile-being-used status")
+ return false, fmt.Errorf("recording volatile-being-used status: %w", err)
}
+ } else {
+ usingVolatile = false
}
}
return usingVolatile, nil
}
+// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a
+// idmapped lower layer.
+func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 {
+ return false, nil
+ }
+
+ feature := "idmapped-lower-dir"
+ overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
+ if err == nil {
+ if overlayCacheResult {
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported")
+ return true, nil
+ }
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported")
+ return false, errors.New(overlayCacheText)
+ }
+ supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home)
+ if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil {
+ return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2)
+ }
+ return supportsIDMappedMounts, err
+}
+
func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) {
var supportsDType bool
@@ -206,9 +230,9 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
- logrus.Debugf("cached value indicated that overlay is supported")
+ logrus.Debugf("Cached value indicated that overlay is supported")
} else {
- logrus.Debugf("cached value indicated that overlay is not supported")
+ logrus.Debugf("Cached value indicated that overlay is not supported")
}
supportsDType = overlayCacheResult
if !supportsDType {
@@ -223,14 +247,14 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
if ok && patherr.Err == syscall.ENOSPC {
return false, err
}
- err = errors.Wrap(err, "kernel does not support overlay fs")
+ err = fmt.Errorf("kernel does not support overlay fs: %w", err)
if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil {
- return false, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err)
+ return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2)
}
return false, err
}
if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil {
- return false, errors.Wrap(err, "error recording overlay support status")
+ return false, fmt.Errorf("recording overlay support status: %w", err)
}
}
return supportsDType, nil
@@ -248,6 +272,35 @@ func (d *Driver) getSupportsVolatile() (bool, error) {
return supportsVolatile, nil
}
+func (d *Driver) getSupportsDataOnly() (bool, error) {
+ if d.supportsDataOnly != nil {
+ return *d.supportsDataOnly, nil
+ }
+ supportsDataOnly, err := supportsDataOnlyLayersCached(d.home, d.runhome)
+ if err != nil {
+ return false, err
+ }
+ d.supportsDataOnly = &supportsDataOnly
+ return supportsDataOnly, nil
+}
+
+// isNetworkFileSystem checks if the specified file system is supported by native overlay
+// as backing store when running in a user namespace.
+func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
+ switch fsMagic {
+ // a bunch of network file systems...
+ case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
+ graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
+ graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
+ graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
+ graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
+ graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
+ graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS:
+ return true
+ }
+ return false
+}
+
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
@@ -261,38 +314,67 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if err != nil {
return nil, err
}
- if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
- backingFs = fsName
+ fsName, ok := graphdriver.FsNames[fsMagic]
+ if !ok {
+ fsName = ""
}
+ backingFs = fsName
- if opts.mountProgram != "" {
- f, err := os.Create(getMountProgramFlagFile(home))
- if err == nil {
- f.Close()
- }
- } else {
- // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
- if opts.forceMask != nil {
- return nil, errors.New("'force_mask' is supported only with 'mount_program'")
- }
- switch fsMagic {
- case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
+ runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+
+ // Create the driver home dir
+ if err := os.MkdirAll(path.Join(home, linkDir), 0o755); err != nil {
+ return nil, err
+ }
+
+ if options.ImageStore != "" {
+ if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil {
+ return nil, err
}
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
- if err != nil {
+ if err := os.MkdirAll(runhome, 0o700); err != nil {
return nil, err
}
- // Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
- return nil, err
+ if opts.mountProgram == "" {
+ if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
+ return nil, err
+ } else if !supported {
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.mountProgram = path
+ }
+ }
}
- runhome := filepath.Join(options.RunRoot, filepath.Base(home))
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
- return nil, err
+
+ if opts.mountProgram != "" {
+ if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
+ m := os.FileMode(0o700)
+ opts.forceMask = &m
+ logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
+ }
+
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil {
+ return nil, err
+ }
+ } else {
+ // check if they are running over btrfs, aufs, overlay, or ecryptfs
+ switch fsMagic {
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
+ return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
+ }
+ if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
+ return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS)
+ }
+ }
+
+ if opts.useComposefs {
+ if unshare.IsRootless() {
+ return nil, fmt.Errorf("composefs is not supported in user namespaces")
+ }
+ if _, err := getComposeFsHelper(); err != nil {
+ return nil, fmt.Errorf("composefs helper program not found: %w", err)
+ }
}
var usingMetacopy bool
@@ -311,24 +393,24 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if metacopyCacheResult {
- logrus.Debugf("cached value indicated that metacopy is being used")
+ logrus.Debugf("Cached value indicated that metacopy is being used")
} else {
- logrus.Debugf("cached value indicated that metacopy is not being used")
+ logrus.Debugf("Cached value indicated that metacopy is not being used")
}
usingMetacopy = metacopyCacheResult
} else {
usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
if err == nil {
if usingMetacopy {
- logrus.Debugf("overlay test mount indicated that metacopy is being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is being used")
} else {
- logrus.Debugf("overlay test mount indicated that metacopy is not being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil {
- return nil, errors.Wrap(err, "error recording metacopy-being-used status")
+ return nil, fmt.Errorf("recording metacopy-being-used status: %w", err)
}
} else {
- logrus.Infof("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
+ logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err)
return nil, err
}
}
@@ -348,15 +430,15 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
d := &Driver{
name: "overlay",
home: home,
+ imageStore: options.ImageStore,
runhome: runhome,
- uidMaps: options.UIDMaps,
- gidMaps: options.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
supportsVolatile: supportsVolatile,
- locker: locker.New(),
+ usingComposefs: opts.useComposefs,
options: *opts,
+ stagingDirsLocks: make(map[string]*lockfile.LockFile),
}
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
@@ -364,12 +446,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to enable project quota support over xfs.
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
- } else if opts.quota.Size > 0 {
- return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
+ return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err)
}
- } else if opts.quota.Size > 0 {
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -400,6 +482,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
return nil, err
}
o.quota.Size = uint64(size)
+ case "inodes":
+ logrus.Debugf("overlay: inodes=%s", val)
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ o.quota.Inodes = uint64(inodes)
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@@ -413,7 +502,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(store)
if err != nil {
- return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
+ return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
@@ -434,7 +523,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(lstore)
if err != nil {
- return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir")
+ return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore)
@@ -456,18 +545,27 @@ func parseOptions(options []string) (*overlayOptions, error) {
withReference: withReference,
})
}
+ case "use_composefs":
+ logrus.Debugf("overlay: use_composefs=%s", val)
+ o.useComposefs, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
case "mount_program":
logrus.Debugf("overlay: mount_program=%s", val)
if val != "" {
- _, err := os.Stat(val)
+ err := fileutils.Exists(val)
if err != nil {
- return nil, errors.Wrapf(err, "overlay: can't stat program %q", val)
+ return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err)
}
}
o.mountProgram = val
case "skip_mount_home":
logrus.Debugf("overlay: skip_mount_home=%s", val)
o.skipMountHome, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
case "ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val)
@@ -479,9 +577,9 @@ func parseOptions(options []string) (*overlayOptions, error) {
var mask int64
switch val {
case "shared":
- mask = 0755
+ mask = 0o755
case "private":
- mask = 0700
+ mask = 0o700
default:
mask, err = strconv.ParseInt(val, 8, 32)
if err != nil {
@@ -491,7 +589,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
m := os.FileMode(mask)
o.forceMask = &m
default:
- return nil, fmt.Errorf("overlay: Unknown option %s", key)
+ return nil, fmt.Errorf("overlay: unknown option %s", key)
}
}
return o, nil
@@ -505,11 +603,11 @@ func cachedFeatureSet(feature string, set bool) string {
}
func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
- content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
+ content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
if err == nil {
return true, string(content), nil
}
- content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
+ content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
if err == nil {
return false, string(content), nil
}
@@ -527,22 +625,39 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
return err
}
-func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
- if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
+func SupportsNativeOverlay(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 || home == "" || runhome == "" {
return false, nil
}
- home := filepath.Join(graphroot, "overlay")
- runhome := filepath.Join(rundir, "overlay")
-
- if _, err := os.Stat(getMountProgramFlagFile(home)); err == nil {
- logrus.Debugf("overlay storage already configured with a mount-program")
+ var contents string
+ flagContent, err := os.ReadFile(getMountProgramFlagFile(home))
+ if err == nil {
+ contents = strings.TrimSpace(string(flagContent))
+ }
+ switch contents {
+ case "true":
+ logrus.Debugf("overlay: storage already configured with a mount-program")
return false, nil
+ default:
+ needsMountProgram, err := scanForMountProgramIndicators(home)
+ if err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+ if needsMountProgram {
+ return false, nil
+ }
+ // fall through to check if we find ourselves needing to use a
+ // mount program now
+ case "false":
}
for _, dir := range []string{home, runhome} {
- if _, err := os.Stat(dir); err != nil {
- _ = idtools.MkdirAllAs(dir, 0700, 0, 0)
+ if err := fileutils.Exists(dir); err != nil {
+ _ = idtools.MkdirAllAs(dir, 0o700, 0, 0)
}
}
@@ -556,16 +671,14 @@ func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
}
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
- // We can try to modprobe overlay first
-
- exec.Command("modprobe", "overlay").Run()
+ selinuxLabelTest := selinux.PrivContainerMountLabel()
logLevel := logrus.ErrorLevel
if unshare.IsRootless() {
logLevel = logrus.DebugLevel
}
- layerDir, err := ioutil.TempDir(home, "compat")
+ layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
patherr, ok := err.(*os.PathError)
if ok && patherr.Err == syscall.ENOSPC {
@@ -585,8 +698,11 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
// Try a test mount in the specific location we're looking at using.
mergedDir := filepath.Join(layerDir, "merged")
+ mergedSubdir := filepath.Join(mergedDir, "subdir")
lower1Dir := filepath.Join(layerDir, "lower1")
lower2Dir := filepath.Join(layerDir, "lower2")
+ lower2Subdir := filepath.Join(lower2Dir, "subdir")
+ lower2SubdirFile := filepath.Join(lower2Subdir, "file")
upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work")
defer func() {
@@ -597,11 +713,18 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
_ = os.RemoveAll(layerDir)
_ = os.Remove(home)
}()
- _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID)
+ _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID)
+ f, err := os.Create(lower2SubdirFile)
+ if err != nil {
+ logrus.Debugf("Unable to create test file: %v", err)
+ return supportsDType, fmt.Errorf("unable to create test file: %w", err)
+ }
+ f.Close()
flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir)
if selinux.GetEnabled() &&
selinux.SecurityCheckContext(selinuxLabelTest) == nil {
@@ -613,14 +736,22 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if unshare.IsRootless() {
flags = fmt.Sprintf("%s,userxattr", flags)
}
+ if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil {
+ logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
+ return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err)
+ }
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.Debugf("overlay test mount with multiple lowers succeeded")
+ if err = os.RemoveAll(mergedSubdir); err != nil {
+ logrus.StandardLogger().Logf(logLevel, "overlay: removing an item from the merged directory failed: %v", err)
+ return supportsDType, fmt.Errorf("kernel returned %v when we tried to delete an item in the merged directory: %w", err, graphdriver.ErrNotSupported)
+ }
+ logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil
}
- logrus.Debugf("overlay test mount with multiple lowers failed %v", err)
+ logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err)
}
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
if selinux.GetEnabled() {
@@ -629,20 +760,24 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.StandardLogger().Logf(logLevel, "overlay test mount with multiple lowers failed, but succeeded with a single lower")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
+ logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
+ return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
}
- logrus.Debugf("overlay test mount with a single lower failed %v", err)
+ logrus.Debugf("overlay: test mount with a single lower failed: %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
- return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
+ return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+ return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported)
}
func (d *Driver) useNaiveDiff() bool {
+ if d.usingComposefs {
+ return true
+ }
+
useNaiveDiffLock.Do(func() {
if d.options.mountProgram != "" {
useNaiveDiffOnly = true
@@ -652,9 +787,9 @@ func (d *Driver) useNaiveDiff() bool {
nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature)
if err == nil {
if nativeDiffCacheResult {
- logrus.Debugf("cached value indicated that native-diff is usable")
+ logrus.Debugf("Cached value indicated that native-diff is usable")
} else {
- logrus.Debugf("cached value indicated that native-diff is not being used")
+ logrus.Debugf("Cached value indicated that native-diff is not being used")
logrus.Info(nativeDiffCacheText)
}
useNaiveDiffOnly = !nativeDiffCacheResult
@@ -665,7 +800,9 @@ func (d *Driver) useNaiveDiff() bool {
logrus.Info(nativeDiffCacheText)
useNaiveDiffOnly = true
}
- cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
+ if err := cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText); err != nil {
+ logrus.Warnf("Recording overlay native-diff support status: %v", err)
+ }
})
return useNaiveDiffOnly
}
@@ -677,25 +814,31 @@ func (d *Driver) String() string {
// Status returns current driver information in a two dimensional string array.
// Output contains "Backing Filesystem" used in this implementation.
func (d *Driver) Status() [][2]string {
+ supportsVolatile, err := d.getSupportsVolatile()
+ if err != nil {
+ supportsVolatile = false
+ }
return [][2]string{
{"Backing Filesystem", backingFs},
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
{"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())},
{"Using metacopy", strconv.FormatBool(d.usingMetacopy)},
+ {"Supports shifting", strconv.FormatBool(d.SupportsShifting())},
+ {"Supports volatile", strconv.FormatBool(supportsVolatile)},
}
}
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
- dir := d.dir(id)
- if _, err := os.Stat(dir); err != nil {
+ dir, _, inAdditionalStore := d.dir2(id, false)
+ if err := fileutils.Exists(dir); err != nil {
return nil, err
}
metadata := map[string]string{
"WorkDir": path.Join(dir, "work"),
- "MergedDir": path.Join(dir, "merged"),
+ "MergedDir": d.getMergedDir(id, dir, inAdditionalStore),
"UpperDir": path.Join(dir, "diff"),
}
@@ -710,20 +853,54 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
return metadata, nil
}
-// Cleanup any state created by overlay which should be cleaned when daemon
-// is being shutdown. For now, we just have to unmount the bind mounted
-// we had created.
+// Cleanup any state created by overlay which should be cleaned when
+// the storage is being shutdown. The only state created by the driver
+// is the bind mount on the home directory.
func (d *Driver) Cleanup() error {
- _ = os.RemoveAll(d.getStagingDir())
+ anyPresent := d.pruneStagingDirectories()
+ if anyPresent {
+ return nil
+ }
return mount.Unmount(d.home)
}
+// pruneStagingDirectories cleans up any staging directory that was leaked.
+// It returns whether any staging directory is still present.
+func (d *Driver) pruneStagingDirectories() bool {
+ for _, lock := range d.stagingDirsLocks {
+ lock.Unlock()
+ }
+ d.stagingDirsLocks = make(map[string]*lockfile.LockFile)
+
+ anyPresent := false
+
+ stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
+ dirs, err := os.ReadDir(stagingDirBase)
+ if err == nil {
+ for _, dir := range dirs {
+ stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name())
+ lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
+ if err != nil {
+ anyPresent = true
+ continue
+ }
+ if err := lock.TryLock(); err != nil {
+ anyPresent = true
+ continue
+ }
+ _ = os.RemoveAll(stagingDirToRemove)
+ lock.Unlock()
+ }
+ }
+ return anyPresent
+}
+
// LookupAdditionalLayer looks up additional layer store by the specified
-// digest and ref and returns an object representing that layer.
+// TOC digest and ref and returns an object representing that layer.
// This API is experimental and can be changed without bumping the major version number.
// TODO: to remove the comment once it's no longer experimental.
-func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
- l, err := d.getAdditionalLayerPath(dgst, ref)
+func (d *Driver) LookupAdditionalLayer(tocDigest digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
+ l, err := d.getAdditionalLayerPath(tocDigest, ref)
if err != nil {
return nil, err
}
@@ -777,6 +954,10 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
}
}
+ if d.options.forceMask != nil && d.options.mountProgram == "" {
+ return fmt.Errorf("overlay: force_mask option for writeable layers is only supported with a mount_program")
+ }
+
if _, ok := opts.StorageOpt["size"]; !ok {
if opts.StorageOpt == nil {
opts.StorageOpt = map[string]string{}
@@ -784,7 +965,14 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
}
- return d.create(id, parent, opts)
+ if _, ok := opts.StorageOpt["inodes"]; !ok {
+ if opts.StorageOpt == nil {
+ opts.StorageOpt = map[string]string{}
+ }
+ opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
+ }
+
+ return d.create(id, parent, opts, false)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
@@ -794,53 +982,88 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+
+ if _, ok := opts.StorageOpt["inodes"]; ok {
+ return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
+ }
}
- return d.create(id, parent, opts)
+ return d.create(id, parent, opts, true)
}
-func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
- dir := d.dir(id)
+func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
+ dir, homedir, _ := d.dir2(id, readOnly)
+
+ disableQuota := readOnly
- uidMaps := d.uidMaps
- gidMaps := d.gidMaps
+ var uidMaps []idtools.IDMap
+ var gidMaps []idtools.IDMap
if opts != nil && opts.IDMappings != nil {
uidMaps = opts.IDMappings.UIDs()
gidMaps = opts.IDMappings.GIDs()
}
+ // Make the link directory if it does not exist
+ if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
+ return err
+ }
+
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
- // Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
- return err
+
+ idPair := idtools.IDPair{
+ UID: rootUID,
+ GID: rootGID,
}
- if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
+
+ if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
+
+ st := idtools.Stat{IDs: idPair, Mode: defaultPerms}
+
if parent != "" {
- st, err := system.Stat(d.dir(parent))
- if err != nil {
- return err
+ parentBase := d.dir(parent)
+ parentDiff := filepath.Join(parentBase, "diff")
+ if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil {
+ st = xSt
+ } else {
+ systemSt, err := system.Stat(parentDiff)
+ if err != nil {
+ return err
+ }
+ st.IDs.UID = int(systemSt.UID())
+ st.IDs.GID = int(systemSt.GID())
+ st.Mode = os.FileMode(systemSt.Mode())
+ }
+ }
+
+ if err := fileutils.Lexists(dir); err == nil {
+ logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
+ // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
+ // so that we can’t end up with two symlinks in linkDir pointing to the same layer.
+ if err := d.Remove(id); err != nil {
+ return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err)
}
- rootUID = int(st.UID())
- rootGID = int(st.GID())
}
- if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
+
+ if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
- os.RemoveAll(dir)
+ if err2 := os.RemoveAll(dir); err2 != nil {
+ logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
+ }
}
}()
- if d.quotaCtl != nil {
+ if d.quotaCtl != nil && !disableQuota {
quota := quota.Quota{}
if opts != nil && len(opts.StorageOpt) > 0 {
driver := &Driver{}
@@ -850,7 +1073,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if driver.options.quota.Size > 0 {
quota.Size = driver.options.quota.Size
}
-
+ if driver.options.quota.Inodes > 0 {
+ quota.Inodes = driver.options.quota.Inodes
+ }
}
// Set container disk quota limit
// If it is set to 0, we will track the disk usage, but not enforce a limit
@@ -859,42 +1084,45 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
}
- perms := defaultPerms
+ forcedSt := st
if d.options.forceMask != nil {
- perms = *d.options.forceMask
- }
- if parent != "" {
- st, err := system.Stat(filepath.Join(d.dir(parent), "diff"))
- if err != nil {
- return err
- }
- perms = os.FileMode(st.Mode())
+ forcedSt.IDs = idPair
+ forcedSt.Mode = *d.options.forceMask
}
- if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
+ diff := path.Join(dir, "diff")
+ if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
+ if d.options.forceMask != nil {
+ if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
+ return err
+ }
+ }
+
lid := generateID(idLength)
- if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
+
+ linkBase := path.Join("..", id, "diff")
+ if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
return err
}
// Write link id to link file
- if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil {
return err
}
- if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
- if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
if parent == "" {
- return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID)
+ return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID)
}
lower, err := d.getLower(parent)
@@ -902,7 +1130,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
if lower != "" {
- if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
+ if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil {
return err
}
}
@@ -922,8 +1150,14 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
return err
}
driver.options.quota.Size = uint64(size)
+ case "inodes":
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ driver.options.quota.Inodes = uint64(inodes)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s", key)
}
}
@@ -934,28 +1168,28 @@ func (d *Driver) getLower(parent string) (string, error) {
parentDir := d.dir(parent)
// Ensure parent exists
- if _, err := os.Lstat(parentDir); err != nil {
+ if err := fileutils.Lexists(parentDir); err != nil {
return "", err
}
// Read Parent link fileA
- parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err := os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
+ return "", fmt.Errorf("recreating the links: %w", err)
}
- parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err = os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
}
lowers := []string{path.Join(linkDir, string(parentLink))}
- parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
+ parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
@@ -964,22 +1198,49 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
- newpath := path.Join(d.home, id)
- if _, err := os.Stat(newpath); err != nil {
- for _, p := range d.AdditionalImageStores() {
+ p, _, _ := d.dir2(id, false)
+ return p
+}
+
+func (d *Driver) getAllImageStores() []string {
+ additionalImageStores := d.AdditionalImageStores()
+ if d.imageStore != "" {
+ additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
+ }
+ return additionalImageStores
+}
+
+// homeDirForImageStore returns the home directory to use when an image store is configured
+func (d *Driver) homeDirForImageStore() string {
+ if d.imageStore != "" {
+ return path.Join(d.imageStore, d.name)
+ }
+ // If there is not an image store configured, use the same
+ // store
+ return d.home
+}
+
+func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
+ homedir := d.home
+ if useImageStore {
+ homedir = d.homeDirForImageStore()
+ }
+ newpath := path.Join(homedir, id)
+ if err := fileutils.Exists(newpath); err != nil {
+ for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
- _, err = os.Stat(l)
+ err = fileutils.Exists(l)
if err == nil {
- return l
+ return l, homedir, true
}
}
}
- return newpath
+ return newpath, homedir, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
- lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
+ lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lower := d.dir(s)
@@ -990,7 +1251,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
if os.IsNotExist(err) {
logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower)
if err := d.recreateSymlinks(); err != nil {
- return nil, fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return nil, fmt.Errorf("recreating the missing symlinks: %w", err)
}
// let's call Readlink on lower again now that we have recreated the missing symlinks
lp, err = os.Readlink(lower)
@@ -1010,12 +1271,6 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
}
func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string {
- if uidMaps == nil {
- uidMaps = d.uidMaps
- }
- if gidMaps == nil {
- gidMaps = d.gidMaps
- }
if uidMaps != nil {
var uids, gids bytes.Buffer
if len(uidMaps) == 1 && uidMaps[0].Size == 1 {
@@ -1047,11 +1302,8 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
-
dir := d.dir(id)
- lid, err := ioutil.ReadFile(path.Join(dir, "link"))
+ lid, err := os.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
@@ -1063,6 +1315,12 @@ func (d *Driver) Remove(id string) error {
if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
+ if d.quotaCtl != nil {
+ d.quotaCtl.ClearQuota(dir)
+ if d.imageStore != "" {
+ d.quotaCtl.ClearQuota(d.imageStore)
+ }
+ }
return nil
}
@@ -1070,23 +1328,22 @@ func (d *Driver) Remove(id string) error {
// under each layer has a symlink created for it under the linkDir. If the symlink does not
// exist, it creates them
func (d *Driver) recreateSymlinks() error {
+ // We have at most 3 corrective actions per layer, so 10 iterations is plenty.
+ const maxIterations = 10
+
// List all the directories under the home directory
- dirs, err := ioutil.ReadDir(d.home)
+ dirs, err := os.ReadDir(d.home)
if err != nil {
- return fmt.Errorf("error reading driver home directory %q: %v", d.home, err)
+ return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
- linksDir := filepath.Join(d.home, "l")
// This makes the link directory if it doesn't exist
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return err
- }
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
return err
}
// Keep looping as long as we take some corrective action in each iteration
var errs *multierror.Error
madeProgress := true
+ iterations := 0
for madeProgress {
errs = nil
madeProgress = false
@@ -1094,19 +1351,19 @@ func (d *Driver) recreateSymlinks() error {
// the layer's "link" file that points to the layer's "diff" directory.
for _, dir := range dirs {
// Skip over the linkDir and anything that is not a directory
- if dir.Name() == linkDir || !dir.Mode().IsDir() {
+ if dir.Name() == linkDir || !dir.IsDir() {
continue
}
// Read the "link" file under each layer to get the name of the symlink
- data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
+ data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading name of symlink for %q", dir))
+ errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
// Check if the symlink exists, and if it doesn't, create it again with the
// name we got from the "link" file
- _, err = os.Lstat(linkPath)
+ err = fileutils.Lexists(linkPath)
if err != nil && os.IsNotExist(err) {
if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil {
errs = multierror.Append(errs, err)
@@ -1114,30 +1371,38 @@ func (d *Driver) recreateSymlinks() error {
}
madeProgress = true
} else if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error trying to stat %q", linkPath))
+ errs = multierror.Append(errs, err)
continue
}
}
+
+ // linkDirFullPath is the full path to the linkDir
+ linkDirFullPath := filepath.Join(d.home, "l")
// Now check if we somehow lost a "link" file, by making sure
// that each symlink we have corresponds to one.
- links, err := ioutil.ReadDir(linksDir)
+ links, err := os.ReadDir(linkDirFullPath)
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading links directory %q", linksDir))
+ errs = multierror.Append(errs, err)
continue
}
// Go through all of the symlinks in the "l" directory
for _, link := range links {
// Read the symlink's target, which should be "../$layer/diff"
- target, err := os.Readlink(filepath.Join(linksDir, link.Name()))
+ target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading target of link %q", link))
+ errs = multierror.Append(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
- errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target))
+ errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
- os.Remove(filepath.Join(linksDir, link.Name()))
+ if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
+ if !os.IsNotExist(err) {
+ errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
+ } // else don’t report any error, but also don’t set madeProgress.
+ continue
+ }
madeProgress = true
continue
}
@@ -1145,15 +1410,22 @@ func (d *Driver) recreateSymlinks() error {
// it has the basename of our symlink in it.
targetID := targetComponents[1]
linkFile := filepath.Join(d.dir(targetID), "link")
- data, err := ioutil.ReadFile(linkFile)
+ data, err := os.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
- if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error correcting link for layer %q", targetID))
+ // NOTE: If two or more links point to the same target, we will update linkFile
+ // with every value of link.Name(), and set madeProgress = true every time.
+ if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
madeProgress = true
}
}
+ iterations++
+ if iterations >= maxIterations {
+ errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
+ break
+ }
}
if errs != nil {
return errs.ErrorOrNil()
@@ -1162,45 +1434,99 @@ func (d *Driver) recreateSymlinks() error {
}
// Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return d.get(id, false, options)
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- dir := d.dir(id)
- if _, err := os.Stat(dir); err != nil {
+ dir, _, inAdditionalStore := d.dir2(id, false)
+ if err := fileutils.Exists(dir); err != nil {
+ return "", err
+ }
+ if _, err := redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), true); err != nil {
+ return "", err
+ }
+
+ // user namespace requires this to move a directory from lower to upper.
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
+ if err != nil {
+ return "", err
+ }
+
+ mergedDir := d.getMergedDir(id, dir, inAdditionalStore)
+ // Attempt to create the merged dir if it doesn't exist, but don't chown an already existing directory (it might be in an additional store)
+ if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil && !os.IsExist(err) {
return "", err
}
- readWrite := true
+
+ if count := d.ctr.Increment(mergedDir); count > 1 {
+ return mergedDir, nil
+ }
+ defer func() {
+ if retErr != nil {
+ if c := d.ctr.Decrement(mergedDir); c <= 0 {
+ if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
+ // Ignore EINVAL, it means the directory is not a mount point and it can happen
+ // if the current function fails before the mount point is created.
+ if !errors.Is(mntErr, unix.EINVAL) {
+ logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
+ }
+ }
+ }
+ }
+ }()
+
+ readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
disableShifting = true
}
+ logLevel := logrus.WarnLevel
+ if unshare.IsRootless() {
+ logLevel = logrus.DebugLevel
+ }
optsList := options.Options
+
+ needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == ""
+
if len(optsList) == 0 {
- optsList = strings.Split(d.options.mountOptions, ",")
+ if d.options.mountOptions != "" {
+ optsList = strings.Split(d.options.mountOptions, ",")
+ }
} else {
// If metacopy=on is present in d.options.mountOptions it must be present in the mount
// options otherwise the kernel refuses to follow the metacopy xattr.
if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) {
if d.usingMetacopy {
+ logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally")
optsList = append(optsList, "metacopy=on")
- } else {
- logrus.Warnf("ignoring metacopy option from storage.conf, not supported with booted kernel")
}
}
}
- for _, o := range optsList {
- if o == "ro" {
- readWrite = false
- break
+ if !d.usingMetacopy {
+ if hasMetacopyOption(optsList) {
+ if d.options.mountProgram == "" {
+ release := ""
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err == nil {
+ release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
+ }
+ logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel %s", release)
+ } else {
+ logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
+ }
}
+ optsList = slices.DeleteFunc(optsList, func(opt string) bool {
+ return opt == "metacopy=on"
+ })
+ }
+
+ if slices.Contains(optsList, "ro") {
+ readWrite = false
}
- lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
+ lowers, err := os.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
}
@@ -1209,27 +1535,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", errors.New("max depth exceeded")
}
- // absLowers is the list of lowers as absolute paths, which works well with additional stores.
+ // absLowers is the list of lowers as absolute paths.
absLowers := []string{}
- // relLowers is the list of lowers as paths relative to the driver's home directory.
- relLowers := []string{}
- // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
- // lists that we're building. "diff" itself is the upper, so it won't be in the lists.
- link, err := ioutil.ReadFile(path.Join(dir, "link"))
- if err != nil {
- if !os.IsNotExist(err) {
- return "", err
- }
- logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link"))
- if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
- }
- link, err = ioutil.ReadFile(path.Join(dir, "link"))
- if err != nil {
- return "", err
- }
- }
diffN := 1
perms := defaultPerms
if d.options.forceMask != nil {
@@ -1243,25 +1551,85 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
- relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
- st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
- if err == nil && !permsKnown {
- perms = os.FileMode(st.Mode())
- permsKnown = true
+ err = fileutils.Exists(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ }
+
+ idmappedMountProcessPid := -1
+ if needsIDMapping {
+ pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps)
+ if err != nil {
+ return "", err
}
+ idmappedMountProcessPid = int(pid)
+ defer cleanupFunc()
}
- // For each lower, resolve its path, and append it and any additional diffN
- // directories to the lowers list.
- for _, l := range splitLowers {
- if l == "" {
- continue
+ skipIDMappingLayers := make(map[string]string)
+
+ composefsMounts := []string{}
+ defer func() {
+ for _, m := range composefsMounts {
+ defer func(m string) {
+ if err := unix.Unmount(m, unix.MNT_DETACH); err != nil {
+ logrus.Warnf("Unmount %q: %v", m, err)
+ }
+ }(m)
}
- lower := ""
+ }()
+
+ composeFsLayers := []string{}
+ maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
+ composefsBlob := d.getComposefsData(lowerID)
+ if err := fileutils.Exists(composefsBlob); err != nil {
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+ return "", err
+ }
+ logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID)
+
+ if readWrite && i == 0 {
+ return "", fmt.Errorf("cannot mount a composefs layer as writeable")
+ }
+
+ dest := d.getStorePrivateDirectory(id, dir, fmt.Sprintf("composefs-layers/%d", i), inAdditionalStore)
+ if err := os.MkdirAll(dest, 0o700); err != nil {
+ return "", err
+ }
+
+ if err := mountComposefsBlob(composefsBlob, dest); err != nil {
+ return "", err
+ }
+ composefsMounts = append(composefsMounts, dest)
+ composeFsPath, err := d.getDiffPath(lowerID)
+ if err != nil {
+ return "", err
+ }
+ composeFsLayers = append(composeFsLayers, composeFsPath)
+ skipIDMappingLayers[composeFsPath] = composeFsPath
+ return dest, nil
+ }
+
+ diffDir := path.Join(dir, "diff")
+
+ if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
+ return "", err
+ } else if dest != "" {
+ diffDir = dest
+ }
+
+ // For each lower, resolve its path, and append it and any additional diffN
+ // directories to the lowers list.
+ for i, l := range splitLowers {
+ if l == "" {
+ continue
+ }
+
+ lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
- for _, p := range d.AdditionalImageStores() {
+ for _, p := range d.getAllImageStores() {
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
@@ -1278,11 +1646,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if lower == "" && os.IsNotExist(err) {
logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath)
if err := d.recreateSymlinks(); err != nil {
- return "", fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return "", fmt.Errorf("recreating the missing symlinks: %w", err)
}
lower = newpath
} else if lower == "" {
- return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
+ return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err)
}
} else {
if !permsKnown {
@@ -1291,75 +1659,152 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
lower = newpath
}
+
+ linkContent, err := os.Readlink(lower)
+ if err != nil {
+ return "", err
+ }
+ lowerID := filepath.Base(filepath.Dir(linkContent))
+ composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite)
+ if err != nil {
+ return "", err
+ }
+ if composefsMount != "" {
+ if needsIDMapping {
+ if err := idmap.CreateIDMappedMount(composefsMount, composefsMount, idmappedMountProcessPid); err != nil {
+ return "", fmt.Errorf("create mapped mount for %q: %w", composefsMount, err)
+ }
+ skipIDMappingLayers[composefsMount] = composefsMount
+ // overlay takes a reference on the mount, so it is safe to unmount
+ // the mapped idmounts as soon as the final overlay file system is mounted.
+ defer func() {
+ if err := unix.Unmount(composefsMount, unix.MNT_DETACH); err != nil {
+ logrus.Warnf("Unmount %q: %v", composefsMount, err)
+ }
+ }()
+ }
+ absLowers = append(absLowers, composefsMount)
+ continue
+ }
+
absLowers = append(absLowers, lower)
- relLowers = append(relLowers, l)
diffN = 1
- _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
+ err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
for err == nil {
absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
- relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN)))
diffN++
- _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
+ err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
}
}
+ if len(composeFsLayers) > 0 {
+ optsList = append(optsList, "metacopy=on", "redirect_dir=on")
+ }
+
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
- relLowers = append(relLowers, path.Join(id, "empty"))
- }
- // user namespace requires this to move a directory from lower to upper.
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return "", err
}
- diffDir := path.Join(dir, "diff")
+
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
- return "", err
+ if !inAdditionalStore {
+ return "", err
+ }
+ // if it is in an additional store, do not fail if the directory already exists
+ if err2 := fileutils.Exists(diffDir); err2 != nil {
+ return "", err
+ }
}
- mergedDir := path.Join(dir, "merged")
- // Create the driver merged dir
- if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
- return "", err
+ workdir := path.Join(dir, "work")
+
+ if d.options.mountProgram == "" && unshare.IsRootless() {
+ optsList = append(optsList, "userxattr")
}
- if count := d.ctr.Increment(mergedDir); count > 1 {
- return mergedDir, nil
+
+ if options.Volatile && !slices.Contains(optsList, "volatile") {
+ supported, err := d.getSupportsVolatile()
+ if err != nil {
+ return "", err
+ }
+ // If "volatile" is not supported by the file system, just ignore the request
+ if supported {
+ optsList = append(optsList, "volatile")
+ }
}
- defer func() {
- if retErr != nil {
- if c := d.ctr.Decrement(mergedDir); c <= 0 {
- if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
- logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
- }
- }
+
+ if needsIDMapping {
+ var newAbsDir []string
+ idMappedMounts := make(map[string]string)
+
+ mappedRoot := filepath.Join(d.home, id, "mapped")
+ if err := os.MkdirAll(mappedRoot, 0o700); err != nil {
+ return "", err
}
- }()
- workdir := path.Join(dir, "work")
+ // rewrite the lower dirs to their idmapped mount.
+ c := 0
+ for _, absLower := range absLowers {
+ mappedMountSrc := getMappedMountRoot(absLower)
- var opts string
- if readWrite {
- opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
- } else {
- opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
- }
- if len(optsList) > 0 {
- opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts)
- }
+ if _, ok := skipIDMappingLayers[absLower]; ok {
+ newAbsDir = append(newAbsDir, absLower)
+ continue
+ }
- if d.options.mountProgram == "" && unshare.IsRootless() {
- opts = fmt.Sprintf("%s,userxattr", opts)
+ root, found := idMappedMounts[mappedMountSrc]
+ if !found {
+ root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
+ c++
+ if err := idmap.CreateIDMappedMount(mappedMountSrc, root, idmappedMountProcessPid); err != nil {
+ return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err)
+ }
+ idMappedMounts[mappedMountSrc] = root
+
+ // overlay takes a reference on the mount, so it is safe to unmount
+ // the mapped idmounts as soon as the final overlay file system is mounted.
+ defer func() {
+ if err := unix.Unmount(root, unix.MNT_DETACH); err != nil {
+ logrus.Warnf("Unmount %q: %v", root, err)
+ }
+ }()
+ }
+
+ // relative path to the layer through the id mapped mount
+ rel, err := filepath.Rel(mappedMountSrc, absLower)
+ if err != nil {
+ return "", err
+ }
+
+ newAbsDir = append(newAbsDir, filepath.Join(root, rel))
+ }
+ absLowers = newAbsDir
}
- // If "volatile" is not supported by the file system, just ignore the request
- if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) {
- supported, err := d.getSupportsVolatile()
+ lowerDirs := strings.Join(absLowers, ":")
+ if len(composeFsLayers) > 0 {
+ sep := "::"
+ supportsDataOnly, err := d.getSupportsDataOnly()
if err != nil {
return "", err
}
- if supported {
- opts = fmt.Sprintf("%s,volatile", opts)
+ if !supportsDataOnly {
+ sep = ":"
}
+ composeFsLayersLowerDirs := strings.Join(composeFsLayers, sep)
+ lowerDirs = lowerDirs + sep + composeFsLayersLowerDirs
+ }
+ // absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
+ // its usage.
+ absLowers = nil //nolint:ineffassign
+
+ var opts string
+ if readWrite {
+ opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir)
+ } else {
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
+ }
+ if len(optsList) > 0 {
+ opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
@@ -1368,16 +1813,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
pageSize := unix.Getpagesize()
- // Use relative paths and mountFrom when the mount data has exceeded
- // the page size. The mount syscall fails if the mount data cannot
- // fit within a page and relative links make the mount data much
- // smaller at the expense of requiring a fork exec to chroot.
if d.options.mountProgram != "" {
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
if !disableShifting {
label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps)
}
+ // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well.
+ if d.options.forceMask != nil {
+ label = label + ",xattr_permissions=2"
+ }
+
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
mountProgram.Dir = d.home
var b bytes.Buffer
@@ -1388,69 +1834,116 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if output == "" {
output = ""
}
- return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output)
+ return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err)
}
return nil
}
- } else if len(mountData) > pageSize {
- workdir = path.Join(id, "work")
- //FIXME: We need to figure out to get this to work with additional stores
+ } else if len(mountData) >= pageSize {
+ // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if
+ // the mount data cannot fit within a page and relative links make the mount data much
+ // smaller at the expense of requiring a fork exec to chdir().
if readWrite {
diffDir := path.Join(id, "diff")
- opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
+ workDir := path.Join(id, "work")
+ opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir)
} else {
- opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
}
- mountData = label.FormatMountLabel(opts, options.MountLabel)
- if len(mountData) > pageSize {
- return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize)
+ if len(optsList) > 0 {
+ opts = strings.Join(append([]string{opts}, optsList...), ",")
}
+ mountData = label.FormatMountLabel(opts, options.MountLabel)
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
- return mountFrom(d.home, source, target, mType, flags, label)
+ return mountOverlayFrom(d.home, source, target, mType, flags, label)
+ }
+ if !inAdditionalStore {
+ mountTarget = path.Join(id, "merged")
}
- mountTarget = path.Join(id, "merged")
}
// overlay has a check in place to prevent mounting the same file system twice
- // if volatile was already specified.
- err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile"))
- if err != nil && !os.IsNotExist(err) {
+ // if volatile was already specified. Yes, the kernel repeats the "work" component.
+ err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile"))
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", err
}
flags, data := mount.ParseOptions(mountData)
logrus.Debugf("overlay: mount_data=%s", mountData)
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
- return "", fmt.Errorf("error creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err)
+ return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err)
}
return mergedDir, nil
}
+// getStorePrivateDirectory returns a directory path for storing data that requires exclusive access.
+// If 'inAdditionalStore' is true, the path will be under the rundir, otherwise it will be placed in
+// the primary store.
+func (d *Driver) getStorePrivateDirectory(id, layerDir, subdir string, inAdditionalStore bool) string {
+ if inAdditionalStore {
+ return path.Join(d.runhome, id, subdir)
+ }
+ return path.Join(layerDir, subdir)
+}
+
+// getMergedDir returns the directory path that should be used as the mount point for the overlayfs.
+func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
+ // Ordinarily, .Get() (layer mounting) callers are supposed to guarantee exclusion.
+ //
+ // But additional stores are initialized with RO locks and don’t support a write
+ // lock operation at all; and naiveDiff operations cause mounts/unmounts, so they might
+ // happen on code paths where we might only holding a RO lock for the additional store.
+ // To prevent races with other processes mounting or unmounting the layer,
+ // use a private directory under the main store rundir, not the "merged" directory inside the
+ // original layer store holding the layer data.
+ //
+ // To support this, contrary to the _general_ locking rules for .Diff / .Changes (which allow a RO lock),
+ // the top-level Store implementation uses an exclusive lock for the primary layer store;
+ // and since the rundir cannot be shared for different stores, it is safe to assume the
+ // current process has exclusive access to it.
+ //
+ // TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store.
+ // (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize,
+ // but DiffSize probably needs to remain for computing sizes of container’s RW layers.)
+ return d.getStorePrivateDirectory(id, dir, "merged", inAdditionalStore)
+}
+
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- dir := d.dir(id)
- if _, err := os.Stat(dir); err != nil {
+ dir, _, inAdditionalStore := d.dir2(id, false)
+ if err := fileutils.Exists(dir); err != nil {
return err
}
- mountpoint := path.Join(dir, "merged")
+ mountpoint := d.getMergedDir(id, dir, inAdditionalStore)
+
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
- if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
+ if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
unmounted := false
+ mappedRoot := filepath.Join(d.home, id, "mapped")
+ // It should not happen, but cleanup any mapped mount if it was leaked.
+ if err := fileutils.Exists(mappedRoot); err == nil {
+ mounts, err := os.ReadDir(mappedRoot)
+ if err == nil {
+ // Go through all of the mapped mounts.
+ for _, m := range mounts {
+ _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH)
+ }
+ }
+ }
+
if d.options.mountProgram != "" {
// Attempt to unmount the FUSE mount using either fusermount or fusermount3.
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mountpoint).Run()
- if err != nil && errors.Cause(err) != exec.ErrNotFound {
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
}
if err == nil {
@@ -1461,7 +1954,7 @@ func (d *Driver) Put(id string) error {
// If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all
// pending changes are propagated to the file system
if !unmounted {
- fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
+ fd, err := unix.Open(mountpoint, unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err == nil {
if err := unix.Syncfs(fd); err != nil {
logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
@@ -1474,22 +1967,75 @@ func (d *Driver) Put(id string) error {
if !unmounted {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
+ if !errors.Is(err, unix.EINVAL) {
+ return fmt.Errorf("unmounting %q: %w", mountpoint, err)
+ }
}
}
- if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
- logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
- }
+ if inAdditionalStore {
+ // check the base name for extra safety
+ if strings.HasPrefix(mountpoint, d.runhome) && filepath.Base(mountpoint) == "merged" {
+ err := os.RemoveAll(filepath.Dir(mountpoint))
+ if err != nil {
+ logrus.Warningf("Failed to remove mountpoint %s overlay: %s: %v", id, mountpoint, err)
+ }
+ }
+ } else {
+ uid, gid := int(0), int(0)
+ fi, err := os.Stat(mountpoint)
+ if err != nil {
+ return err
+ }
+ if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
+ uid, gid = int(stat.Uid), int(stat.Gid)
+ }
+ tmpMountpoint := path.Join(dir, "merged.1")
+ if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
+ return err
+ }
+ // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
+ // its atomic semantic. In this way the "merged" directory is never removed.
+ if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
+ logrus.Debugf("Failed to replace mountpoint %s overlay: %s: %v", id, mountpoint, err)
+ return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
+ }
+ }
return nil
}
// Exists checks to see if the id is already mounted.
func (d *Driver) Exists(id string) bool {
- _, err := os.Stat(d.dir(id))
+ err := fileutils.Exists(d.dir(id))
return err == nil
}
+// List layers (not including additional image stores)
+func (d *Driver) ListLayers() ([]string, error) {
+ entries, err := os.ReadDir(d.home)
+ if err != nil {
+ return nil, err
+ }
+ layers := make([]string, 0)
+
+ for _, entry := range entries {
+ id := entry.Name()
+ switch id {
+ case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile:
+ // expected, but not a layer. skip it
+ continue
+ default:
+ // Does it look like a datadir directory?
+ if !entry.IsDir() {
+ continue
+ }
+ layers = append(layers, id)
+ }
+ }
+ return layers, nil
+}
+
// isParent returns if the passed in parent is the direct parent of the passed in layer
func (d *Driver) isParent(id, parent string) bool {
lowers, err := d.getLowerDirs(id)
@@ -1522,90 +2068,245 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
return whiteoutFormat
}
-type fileGetNilCloser struct {
- storage.FileGetter
+type overlayFileGetter struct {
+ diffDirs []string
+ composefsMounts map[string]*os.File // map from diff dir to the directory with the composefs blob mounted
}
-func (f fileGetNilCloser) Close() error {
- return nil
+func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
+ buf := make([]byte, unix.PathMax)
+ for _, d := range g.diffDirs {
+ if f, found := g.composefsMounts[d]; found {
+ // there is no *at equivalent for getxattr, but it can be emulated by opening the file under /proc/self/fd/$FD/$PATH
+ len, err := unix.Getxattr(fmt.Sprintf("/proc/self/fd/%d/%s", int(f.Fd()), path), "trusted.overlay.redirect", buf)
+ if err != nil {
+ if errors.Is(err, unix.ENODATA) {
+ continue
+ }
+ return nil, &fs.PathError{Op: "getxattr", Path: path, Err: err}
+ }
+
+ // the xattr value is the path to the file in the composefs layer diff directory
+ return os.Open(filepath.Join(d, string(buf[:len])))
+ }
+
+ f, err := os.Open(filepath.Join(d, path))
+ if err == nil {
+ return f, nil
+ }
+ }
+ if len(g.diffDirs) > 0 {
+ return os.Open(filepath.Join(g.diffDirs[0], path))
+ }
+ return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
}
-func (d *Driver) getStagingDir() string {
- return filepath.Join(d.home, "staging")
+func (g *overlayFileGetter) Close() error {
+ var errs *multierror.Error
+ for _, f := range g.composefsMounts {
+ if err := f.Close(); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ if err := unix.Rmdir(f.Name()); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ }
+ return errs.ErrorOrNil()
+}
+
+// newStagingDir creates a new staging directory and returns the path to it.
+func (d *Driver) newStagingDir() (string, error) {
+ stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
+ err := os.MkdirAll(stagingDirBase, 0o700)
+ if err != nil && !os.IsExist(err) {
+ return "", err
+ }
+ return os.MkdirTemp(stagingDirBase, "")
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
-// contains files for the layer differences. Used for direct access for tar-split.
-func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+// contains files for the layer differences, either for this layer, or one of our
+// lowers if we're just a template directory. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) {
p, err := d.getDiffPath(id)
if err != nil {
return nil, err
}
- return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+ paths, err := d.getLowerDiffPaths(id)
+ if err != nil {
+ return nil, err
+ }
+
+ // map from diff dir to the directory with the composefs blob mounted
+ composefsMounts := make(map[string]*os.File)
+ defer func() {
+ if Err != nil {
+ for _, f := range composefsMounts {
+ f.Close()
+ if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) {
+ logrus.Warnf("Failed to remove %s: %v", f.Name(), err)
+ }
+ }
+ }
+ }()
+ diffDirs := append([]string{p}, paths...)
+ for _, diffDir := range diffDirs {
+ // diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory
+ id := path.Base(path.Dir(diffDir))
+ composefsData := d.getComposefsData(id)
+ if fileutils.Exists(composefsData) != nil {
+ // not a composefs layer, ignore it
+ continue
+ }
+ fd, err := openComposefsMount(composefsData)
+ if err != nil {
+ return nil, err
+ }
+ composefsMounts[diffDir] = os.NewFile(uintptr(fd), composefsData)
+ }
+ return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil
}
// CleanupStagingDirectory cleanups the staging directory.
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
- return os.RemoveAll(stagingDirectory)
+ parentStagingDir := filepath.Dir(stagingDirectory)
+
+ if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
+ delete(d.stagingDirsLocks, parentStagingDir)
+ lock.Unlock()
+ }
+
+ return os.RemoveAll(parentStagingDir)
+}
+
+func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
+ feature := "dataonly-layers"
+ overlayCacheResult, _, err := cachedFeatureCheck(runhome, feature)
+ if err == nil {
+ if overlayCacheResult {
+ logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
+ return true, nil
+ }
+ logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
+ return false, nil
+ }
+ supportsDataOnly, err := supportsDataOnlyLayers(home)
+ if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
+ return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2)
+ }
+ return supportsDataOnly, err
}
-// ApplyDiff applies the changes in the new layer using the specified function
-func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
+// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
+func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
var idMappings *idtools.IDMappings
+ var forceMask *os.FileMode
+
if options != nil {
idMappings = options.Mappings
+ forceMask = options.ForceMask
}
+ if d.options.forceMask != nil {
+ forceMask = d.options.forceMask
+ }
+
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
- applyDir := ""
-
- if id == "" {
- err := os.MkdirAll(d.getStagingDir(), 0700)
- if err != nil && !os.IsExist(err) {
- return graphdriver.DriverWithDifferOutput{}, err
- }
- applyDir, err = ioutil.TempDir(d.getStagingDir(), "")
- if err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
+ layerDir, err := d.newStagingDir()
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+ perms := defaultPerms
+ if forceMask != nil {
+ perms = *forceMask
+ }
+ applyDir := filepath.Join(layerDir, "dir")
+ if err := os.Mkdir(applyDir, perms); err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
- } else {
- var err error
- applyDir, err = d.getDiffPath(id)
- if err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
+ lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
}
+ defer func() {
+ if errRet != nil {
+ delete(d.stagingDirsLocks, layerDir)
+ lock.Unlock()
+ }
+ }()
+ d.stagingDirsLocks[layerDir] = lock
+ lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
+ differOptions := graphdriver.DifferOptions{
+ Format: graphdriver.DifferOutputFormatDir,
+ }
+ if d.usingComposefs {
+ differOptions.Format = graphdriver.DifferOutputFormatFlat
+ differOptions.UseFsVerity = graphdriver.DifferFsVerityIfAvailable
+ }
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
- InUserNS: rsystem.RunningInUserNS(),
- })
+ InUserNS: unshare.IsRootless(),
+ ForceMask: forceMask,
+ }, &differOptions)
+
out.Target = applyDir
+
return out, err
}
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
-func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error {
- if filepath.Dir(stagingDirectory) != d.getStagingDir() {
- return fmt.Errorf("%q is not a staging directory", stagingDirectory)
- }
+func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
+ stagingDirectory := diffOutput.Target
+ parentStagingDir := filepath.Dir(stagingDirectory)
+
+ defer func() {
+ if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
+ delete(d.stagingDirsLocks, parentStagingDir)
+ lock.Unlock()
+ }
+ }()
- diff, err := d.getDiffPath(id)
+ diffPath, err := d.getDiffPath(id)
if err != nil {
return err
}
- if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
+
+ // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode.
+ if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" {
+ parentDiffPath, err := d.getDiffPath(parent)
+ if err != nil {
+ return err
+ }
+ parentSt, err := os.Stat(parentDiffPath)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil {
+ return err
+ }
+ }
+
+ if d.usingComposefs {
+ toc := diffOutput.Artifacts[tocArtifact]
+ verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string)
+ if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
+ return err
+ }
+ }
+ if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
return err
}
- return os.Rename(stagingDirectory, diff)
+
+ return os.Rename(stagingDirectory, diffPath)
}
// DifferTarget gets the location where files are stored for the layer.
@@ -1615,7 +2316,6 @@ func (d *Driver) DifferTarget(id string) (string, error) {
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
-
if !d.isParent(id, parent) {
if d.options.ignoreChownErrors {
options.IgnoreChownErrors = d.options.ignoreChownErrors
@@ -1644,7 +2344,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
IgnoreChownErrors: d.options.ignoreChownErrors,
ForceMask: d.options.forceMask,
WhiteoutFormat: d.getWhiteoutFormat(),
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}); err != nil {
return 0, err
}
@@ -1652,9 +2352,14 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
return directory.Size(applyDir)
}
+func (d *Driver) getComposefsData(id string) string {
+ dir := d.dir(id)
+ return path.Join(dir, "composefs-data")
+}
+
func (d *Driver) getDiffPath(id string) (string, error) {
dir := d.dir(id)
- return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
+ return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false)
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@@ -1663,7 +2368,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
return nil, err
}
for i, l := range layers {
- layers[i], err = redirectDiffIfAdditionalLayer(l)
+ layers[i], err = redirectDiffIfAdditionalLayer(l, false)
if err != nil {
return nil, err
}
@@ -1675,7 +2380,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
- if d.options.mountProgram == "" && (d.useNaiveDiff() || !d.isParent(id, parent)) {
+ if !d.isParent(id, parent) {
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
}
@@ -1726,14 +2431,18 @@ func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent strin
// layers.
diffPath, err := d.getDiffPath(id)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to get diff path: %w", err)
}
layers, err := d.getLowerDiffPaths(id)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to get lower diff path: %w", err)
}
- return archive.OverlayChanges(layers, diffPath)
+ c, err := archive.OverlayChanges(layers, diffPath)
+ if err != nil {
+ return nil, fmt.Errorf("computing changes: %w", err)
+ }
+ return c, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
@@ -1768,7 +2477,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost)
if err != nil {
if err2 := d.Put(id); err2 != nil {
- logrus.Errorf("%v; error unmounting %v: %v", err, id, err2)
+ logrus.Errorf("%v; unmounting %v: %v", err, id, err2)
}
return err
}
@@ -1789,7 +2498,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
}
for err == nil {
i++
- _, err = os.Stat(nameWithSuffix(diffDir, i))
+ err = fileutils.Exists(nameWithSuffix(diffDir, i))
}
for i > 0 {
@@ -1816,12 +2525,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
return nil
}
+// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with
+// overlay lower layers.
+func (d *Driver) supportsIDmappedMounts() bool {
+ if d.supportsIDMappedMounts != nil {
+ return *d.supportsIDMappedMounts
+ }
+
+ supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome)
+ d.supportsIDMappedMounts = &supportsIDMappedMounts
+ if err == nil {
+ return supportsIDMappedMounts
+ }
+ logrus.Debugf("Check for idmapped mounts support %v", err)
+ return false
+}
+
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (d *Driver) SupportsShifting() bool {
- if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" {
+ if os.Getenv("_CONTAINERS_OVERLAY_DISABLE_IDMAP") == "yes" {
+ return false
+ }
+ if d.options.mountProgram != "" {
return true
}
- return d.options.mountProgram != ""
+ return d.supportsIDmappedMounts()
}
// dumbJoin is more or less a dumber version of filepath.Join, but one which
@@ -1841,37 +2569,42 @@ func nameWithSuffix(name string, number int) string {
return fmt.Sprintf("%s%d", name, number)
}
-func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) {
+func validateOneAdditionalLayerPath(target string) error {
+ for _, p := range []string{
+ filepath.Join(target, "diff"),
+ filepath.Join(target, "info"),
+ filepath.Join(target, "blob"),
+ } {
+ if err := fileutils.Exists(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) {
refElem := base64.StdEncoding.EncodeToString([]byte(ref))
for _, ls := range d.options.layerStores {
ref := ""
if ls.withReference {
ref = refElem
}
- target := path.Join(ls.path, ref, dgst.String())
- // Check if all necessary files exist
- for _, p := range []string{
- filepath.Join(target, "diff"),
- filepath.Join(target, "info"),
- filepath.Join(target, "blob"),
- } {
- if _, err := os.Stat(p); err != nil {
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "failed to stat additional layer %q: %v", p, err)
- }
+ target := path.Join(ls.path, ref, tocDigest.String())
+ err := validateOneAdditionalLayerPath(target)
+ if err == nil {
+ return target, nil
}
- return target, nil
+ logrus.Debugf("additional Layer Store %v failed to stat additional layer: %v", ls, err)
}
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "additional layer (%q, %q) not found", dgst, ref)
+ return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown)
}
func (d *Driver) releaseAdditionalLayerByID(id string) {
if al, err := d.getAdditionalLayerPathByID(id); err == nil {
notifyReleaseAdditionalLayer(al)
} else if !os.IsNotExist(err) {
- logrus.Warnf("unexpected error on reading Additional Layer Store pointer %v", err)
+ logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err)
}
}
@@ -1889,7 +2622,7 @@ func (al *additionalLayer) Info() (io.ReadCloser, error) {
return os.Open(filepath.Join(al.path, "info"))
}
-// Blob returns a reader of the raw contents of this leyer.
+// Blob returns a reader of the raw contents of this layer.
func (al *additionalLayer) Blob() (io.ReadCloser, error) {
return os.Open(filepath.Join(al.path, "blob"))
}
@@ -1909,7 +2642,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
// tell the additional layer store that we use this layer.
// mark this layer as "additional layer"
- if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil {
return err
}
notifyUseAdditionalLayer(al.path)
@@ -1917,7 +2650,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
- al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
+ al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer"))
if err != nil {
return "", err
}
@@ -1952,10 +2685,10 @@ func notifyUseAdditionalLayer(al string) {
} else if err == nil {
f.Close()
if err := os.Remove(useFile); err != nil {
- logrus.Warnf("failed to remove use file")
+ logrus.Warnf("Failed to remove use file")
}
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
}
// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified
@@ -1972,21 +2705,38 @@ func notifyReleaseAdditionalLayer(al string) {
if os.IsNotExist(err) {
return
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
}
// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
// returns the redirected path. If the passed diff is not the one in Additional Layer
// Store, it returns the original path without changes.
-func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
+func redirectDiffIfAdditionalLayer(diffPath string, checkExistence bool) (string, error) {
if ld, err := os.Readlink(diffPath); err == nil {
// diff is the link to Additional Layer Store
if !path.IsAbs(ld) {
return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld)
}
+ if checkExistence {
+ if err := fileutils.Exists(ld); err != nil {
+ return "", fmt.Errorf("failed to access to the linked additional layer: %w", err)
+ }
+ }
diffPath = ld
} else if err.(*os.PathError).Err != syscall.EINVAL {
return "", err
}
return diffPath, nil
}
+
+// getMappedMountRoot is a heuristic that calculates the parent directory where
+// the idmapped mount should be applied.
+// It is useful to minimize the number of idmapped mounts and at the same time use
+// a common path as long as possible to reduce the length of the mount data argument.
+func getMappedMountRoot(path string) string {
+ dirName := filepath.Dir(path)
+ if filepath.Base(dirName) == linkDir {
+ return filepath.Dir(dirName)
+ }
+ return dirName
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
deleted file mode 100644
index 0b70a5d92..000000000
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build linux,cgo
-
-package overlay
-
-import (
- "path"
-
- "github.com/containers/storage/pkg/directory"
-)
-
-// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
-// For Overlay, it attempts to check the XFS quota for size, and falls back to
-// finding the size of the "diff" directory.
-func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
- usage := &directory.DiskUsage{}
- if d.quotaCtl != nil {
- err := d.quotaCtl.GetDiskUsage(d.dir(id), usage)
- return usage, err
- }
- return directory.Usage(path.Join(d.dir(id), "diff"))
-}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go
new file mode 100644
index 000000000..39ca489f5
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go
@@ -0,0 +1,21 @@
+//go:build linux && cgo && !exclude_disk_quota
+
+package overlay
+
+import (
+ "path"
+
+ "github.com/containers/storage/pkg/directory"
+)
+
+// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
+// For Overlay, it attempts to check the XFS quota for size, and falls back to
+// finding the size of the "diff" directory.
+func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
+ usage := &directory.DiskUsage{}
+ if d.quotaCtl != nil {
+ err := d.quotaCtl.GetDiskUsage(d.dir(id), usage)
+ return usage, err
+ }
+ return directory.Usage(path.Join(d.dir(id), "diff"))
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go
new file mode 100644
index 000000000..221006b28
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go
@@ -0,0 +1,16 @@
+//go:build linux && (!cgo || exclude_disk_quota)
+
+package overlay
+
+import (
+ "path"
+
+ "github.com/containers/storage/pkg/directory"
+)
+
+// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
+// For Overlay, it attempts to check the XFS quota for size, and falls back to
+// finding the size of the "diff" directory.
+func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
+ return directory.Usage(path.Join(d.dir(id), "diff"))
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
index 1cdac7777..a6df21d0f 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
@@ -1,16 +1,23 @@
-// +build linux,!cgo
+//go:build linux && !cgo
package overlay
import (
- "path"
-
- "github.com/containers/storage/pkg/directory"
+ "fmt"
)
-// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
-// For Overlay, it attempts to check the XFS quota for size, and falls back to
-// finding the size of the "diff" directory.
-func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
- return directory.Usage(path.Join(d.dir(id), "diff"))
+func openComposefsMount(dataDir string) (int, error) {
+ return 0, fmt.Errorf("composefs not supported on this build")
+}
+
+func getComposeFsHelper() (string, error) {
+ return "", fmt.Errorf("composefs not supported on this build")
+}
+
+func mountComposefsBlob(dataDir, mountPoint string) error {
+ return fmt.Errorf("composefs not supported on this build")
+}
+
+func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
+ return fmt.Errorf("composefs not supported on this build")
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
index 49af84a22..b35633143 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux
package overlay
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
index fc565ef0b..85045f66e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package overlay
@@ -47,13 +47,13 @@ func generateID(l int) string {
if retryOnError(err) && retries < maxretries {
count += n
retries++
- logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
+ logrus.Errorf("Generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
- panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err))
}
break
diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
index 9fc57b36b..b5baa11f4 100644
--- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
+++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package overlayutils
@@ -6,7 +6,6 @@ import (
"fmt"
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -16,5 +15,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
- return errors.Wrap(graphdriver.ErrNotSupported, msg)
+ return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index d805623b5..081fb2b10 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -1,357 +1,5 @@
-// +build linux,!exclude_disk_quota,cgo
-
-//
-// projectquota.go - implements XFS project quota controls
-// for setting quota limits on a newly created directory.
-// It currently supports the legacy XFS specific ioctls.
-//
-// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
-// for both xfs/ext4 for kernel version >= v4.5
-//
-
package quota
-/*
-#include
-#include
-#include
-#include
-#include
-
-#ifndef FS_XFLAG_PROJINHERIT
-struct fsxattr {
- __u32 fsx_xflags;
- __u32 fsx_extsize;
- __u32 fsx_nextents;
- __u32 fsx_projid;
- unsigned char fsx_pad[12];
-};
-#define FS_XFLAG_PROJINHERIT 0x00000200
-#endif
-#ifndef FS_IOC_FSGETXATTR
-#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
-#endif
-#ifndef FS_IOC_FSSETXATTR
-#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
-#endif
-
-#ifndef PRJQUOTA
-#define PRJQUOTA 2
-#endif
-#ifndef XFS_PROJ_QUOTA
-#define XFS_PROJ_QUOTA 2
-#endif
-#ifndef Q_XSETPQLIM
-#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
-#endif
-#ifndef Q_XGETPQUOTA
-#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
-#endif
-*/
-import "C"
-import (
- "fmt"
- "io/ioutil"
- "path"
- "path/filepath"
- "unsafe"
-
- "github.com/containers/storage/pkg/directory"
- "github.com/sirupsen/logrus"
- "golang.org/x/sys/unix"
-)
-
-// Quota limit params - currently we only control blocks hard limit
-type Quota struct {
- Size uint64
-}
-
-// Control - Context to be used by storage driver (e.g. overlay)
-// who wants to apply project quotas to container dirs
-type Control struct {
- backingFsBlockDev string
- nextProjectID uint32
- quotas map[string]uint32
-}
-
-// NewControl - initialize project quota support.
-// Test to make sure that quota can be set on a test dir and find
-// the first project id to be used for the next container create.
-//
-// Returns nil (and error) if project quota is not supported.
-//
-// First get the project id of the home directory.
-// This test will fail if the backing fs is not xfs.
-//
-// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
-// echo 999:/var/lib/containers/storage/overlay >> /etc/projects
-// echo storage:999 >> /etc/projid
-// xfs_quota -x -c 'project -s storage' /
-//
-// In that case, the home directory project id will be used as a "start offset"
-// and all containers will be assigned larger project ids (e.g. >= 1000).
-// This is a way to prevent xfs_quota management from conflicting with containers/storage.
-//
-// Then try to create a test directory with the next project id and set a quota
-// on it. If that works, continue to scan existing containers to map allocated
-// project ids.
-//
-func NewControl(basePath string) (*Control, error) {
- //
- // Get project id of parent dir as minimal id to be used by driver
- //
- minProjectID, err := getProjectID(basePath)
- if err != nil {
- return nil, err
- }
- minProjectID++
-
- //
- // create backing filesystem device node
- //
- backingFsBlockDev, err := makeBackingFsDev(basePath)
- if err != nil {
- return nil, err
- }
-
- //
- // Test if filesystem supports project quotas by trying to set
- // a quota on the first available project id
- //
- quota := Quota{
- Size: 0,
- }
- if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
- return nil, err
- }
-
- q := Control{
- backingFsBlockDev: backingFsBlockDev,
- nextProjectID: minProjectID + 1,
- quotas: make(map[string]uint32),
- }
-
- //
- // get first project id to be used for next container
- //
- err = q.findNextProjectID(basePath)
- if err != nil {
- return nil, err
- }
-
- logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
- return &q, nil
-}
-
-// SetQuota - assign a unique project id to directory and set the quota limits
-// for that project id
-func (q *Control) SetQuota(targetPath string, quota Quota) error {
-
- projectID, ok := q.quotas[targetPath]
- if !ok {
- projectID = q.nextProjectID
-
- //
- // assign project id to new container directory
- //
- err := setProjectID(targetPath, projectID)
- if err != nil {
- return err
- }
-
- q.quotas[targetPath] = projectID
- q.nextProjectID++
- }
-
- //
- // set the quota limit for the container's project id
- //
- logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
- return setProjectQuota(q.backingFsBlockDev, projectID, quota)
-}
-
-// setProjectQuota - set the quota for project id on xfs block device
-func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
- var d C.fs_disk_quota_t
- d.d_version = C.FS_DQUOT_VERSION
- d.d_id = C.__u32(projectID)
- d.d_flags = C.XFS_PROJ_QUOTA
-
- d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
- d.d_blk_hardlimit = C.__u64(quota.Size / 512)
- d.d_blk_softlimit = d.d_blk_hardlimit
-
- var cs = C.CString(backingFsBlockDev)
- defer C.free(unsafe.Pointer(cs))
-
- _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
- uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
- uintptr(unsafe.Pointer(&d)), 0, 0)
- if errno != 0 {
- return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
- projectID, backingFsBlockDev, errno.Error())
- }
-
- return nil
-}
-
-// GetQuota - get the quota limits of a directory that was configured with SetQuota
-func (q *Control) GetQuota(targetPath string, quota *Quota) error {
- d, err := q.fsDiskQuotaFromPath(targetPath)
- if err != nil {
- return err
- }
- quota.Size = uint64(d.d_blk_hardlimit) * 512
- return nil
-}
-
-// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
-func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
- d, err := q.fsDiskQuotaFromPath(targetPath)
- if err != nil {
- return err
- }
- usage.Size = int64(d.d_bcount) * 512
- usage.InodeCount = int64(d.d_icount)
-
- return nil
-}
-
-func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
- var d C.fs_disk_quota_t
-
- projectID, ok := q.quotas[targetPath]
- if !ok {
- return d, fmt.Errorf("quota not found for path : %s", targetPath)
- }
-
- //
- // get the quota limit for the container's project id
- //
- var cs = C.CString(q.backingFsBlockDev)
- defer C.free(unsafe.Pointer(cs))
-
- _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
- uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
- uintptr(unsafe.Pointer(&d)), 0, 0)
- if errno != 0 {
- return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
- projectID, q.backingFsBlockDev, errno.Error())
- }
-
- return d, nil
-}
-
-// getProjectID - get the project id of path on xfs
-func getProjectID(targetPath string) (uint32, error) {
- dir, err := openDir(targetPath)
- if err != nil {
- return 0, err
- }
- defer closeDir(dir)
-
- var fsx C.struct_fsxattr
- _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
- uintptr(unsafe.Pointer(&fsx)))
- if errno != 0 {
- return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
- }
-
- return uint32(fsx.fsx_projid), nil
-}
-
-// setProjectID - set the project id of path on xfs
-func setProjectID(targetPath string, projectID uint32) error {
- dir, err := openDir(targetPath)
- if err != nil {
- return err
- }
- defer closeDir(dir)
-
- var fsx C.struct_fsxattr
- _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
- uintptr(unsafe.Pointer(&fsx)))
- if errno != 0 {
- return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
- }
- fsx.fsx_projid = C.__u32(projectID)
- fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
- _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
- uintptr(unsafe.Pointer(&fsx)))
- if errno != 0 {
- return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
- }
-
- return nil
-}
-
-// findNextProjectID - find the next project id to be used for containers
-// by scanning driver home directory to find used project ids
-func (q *Control) findNextProjectID(home string) error {
- files, err := ioutil.ReadDir(home)
- if err != nil {
- return fmt.Errorf("read directory failed : %s", home)
- }
- for _, file := range files {
- if !file.IsDir() {
- continue
- }
- path := filepath.Join(home, file.Name())
- projid, err := getProjectID(path)
- if err != nil {
- return err
- }
- if projid > 0 {
- q.quotas[path] = projid
- }
- if q.nextProjectID <= projid {
- q.nextProjectID = projid + 1
- }
- }
-
- return nil
-}
-
-func free(p *C.char) {
- C.free(unsafe.Pointer(p))
-}
-
-func openDir(path string) (*C.DIR, error) {
- Cpath := C.CString(path)
- defer free(Cpath)
-
- dir := C.opendir(Cpath)
- if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
- }
- return dir, nil
-}
-
-func closeDir(dir *C.DIR) {
- if dir != nil {
- C.closedir(dir)
- }
-}
-
-func getDirFd(dir *C.DIR) uintptr {
- return uintptr(C.dirfd(dir))
-}
-
-// Get the backing block device of the driver home directory
-// and create a block device node under the home directory
-// to be used by quotactl commands
-func makeBackingFsDev(home string) (string, error) {
- var stat unix.Stat_t
- if err := unix.Stat(home, &stat); err != nil {
- return "", err
- }
-
- backingFsBlockDev := path.Join(home, "backingFsBlockDev")
- // Re-create just in case someone copied the home directory over to a new device
- unix.Unlink(backingFsBlockDev)
- if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
- return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
- }
-
- return backingFsBlockDev, nil
-}
+// BackingFsBlockDeviceLink is the name of a file that we place in
+// the home directory of a driver that uses this package.
+const BackingFsBlockDeviceLink = "backingFsBlockDev"
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
new file mode 100644
index 000000000..59ba5b0b2
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
@@ -0,0 +1,485 @@
+//go:build linux && !exclude_disk_quota && cgo
+
+//
+// projectquota.go - implements XFS project quota controls
+// for setting quota limits on a newly created directory.
+// It currently supports the legacy XFS specific ioctls.
+//
+// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
+// for both xfs/ext4 for kernel version >= v4.5
+//
+
+package quota
+
+/*
+#include
+#include
+#include
+#include
+#include
+
+#ifndef FS_XFLAG_PROJINHERIT
+struct fsxattr {
+ __u32 fsx_xflags;
+ __u32 fsx_extsize;
+ __u32 fsx_nextents;
+ __u32 fsx_projid;
+ unsigned char fsx_pad[12];
+};
+#define FS_XFLAG_PROJINHERIT 0x00000200
+#endif
+#ifndef FS_IOC_FSGETXATTR
+#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
+#endif
+#ifndef FS_IOC_FSSETXATTR
+#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
+#endif
+
+#ifndef PRJQUOTA
+#define PRJQUOTA 2
+#endif
+#ifndef FS_PROJ_QUOTA
+#define FS_PROJ_QUOTA 2
+#endif
+#ifndef Q_XSETPQLIM
+#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
+#endif
+#ifndef Q_XGETPQUOTA
+#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
+#endif
+*/
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/containers/storage/pkg/directory"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+const projectIDsAllocatedPerQuotaHome = 10000
+
+// Quota limit params - currently we only control blocks hard limit and inodes
+type Quota struct {
+ Size uint64
+ Inodes uint64
+}
+
+// Control - Context to be used by storage driver (e.g. overlay)
+// who wants to apply project quotas to container dirs
+type Control struct {
+ backingFsBlockDev string
+ nextProjectID uint32
+ quotas *sync.Map
+ basePath string
+}
+
+// Attempt to generate a unigue projectid. Multiple directories
+// per file system can have quota and they need a group of unique
+// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
+// unique projectids, based on the inode of the basepath.
+func generateUniqueProjectID(path string) (uint32, error) {
+ fileinfo, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ stat, ok := fileinfo.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
+ }
+ projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
+ return uint32(projectID), nil
+}
+
+// NewControl - initialize project quota support.
+// Test to make sure that quota can be set on a test dir and find
+// the first project id to be used for the next container create.
+//
+// Returns nil (and error) if project quota is not supported.
+//
+// First get the project id of the basePath directory.
+// This test will fail if the backing fs is not xfs.
+//
+// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
+// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
+// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
+// echo storage:100000 >> /etc/projid
+// echo volumes:200000 >> /etc/projid
+// xfs_quota -x -c 'project -s storage volumes' /
+//
+// In the example above, the storage directory project id will be used as a
+// "start offset" and all containers will be assigned larger project ids
+// (e.g. >= 100000). Then the volumes directory project id will be used as a
+// "start offset" and all volumes will be assigned larger project ids
+// (e.g. >= 200000).
+// This is a way to prevent xfs_quota management from conflicting with
+// containers/storage.
+
+// Then try to create a test directory with the next project id and set a quota
+// on it. If that works, continue to scan existing containers to map allocated
+// project ids.
+func NewControl(basePath string) (*Control, error) {
+ //
+ // Get project id of parent dir as minimal id to be used by driver
+ //
+ minProjectID, err := getProjectID(basePath)
+ if err != nil {
+ return nil, err
+ }
+ if minProjectID == 0 {
+ // Indicates the storage was never initialized
+ // Generate a unique range of Projectids for this basepath
+ minProjectID, err = generateUniqueProjectID(basePath)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ //
+ // create backing filesystem device node
+ //
+ backingFsBlockDev, err := makeBackingFsDev(basePath)
+ if err != nil {
+ return nil, err
+ }
+
+ //
+ // Test if filesystem supports project quotas by trying to set
+ // a quota on the first available project id
+ //
+ quota := Quota{
+ Size: 0,
+ Inodes: 0,
+ }
+
+ q := Control{
+ backingFsBlockDev: backingFsBlockDev,
+ nextProjectID: minProjectID + 1,
+ quotas: &sync.Map{},
+ basePath: basePath,
+ }
+
+ if err := q.setProjectQuota(minProjectID, quota); err != nil {
+ return nil, err
+ }
+
+ // Clear inherit flag from top-level directory if necessary.
+ if err := stripProjectInherit(basePath); err != nil {
+ return nil, err
+ }
+
+ //
+ // get first project id to be used for next container
+ //
+ err = q.findNextProjectID()
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
+ return &q, nil
+}
+
+// SetQuota - assign a unique project id to directory and set the quota limits
+// for that project id
+func (q *Control) SetQuota(targetPath string, quota Quota) error {
+ var projectID uint32
+ value, ok := q.quotas.Load(targetPath)
+ if ok {
+ projectID, ok = value.(uint32)
+ }
+ if !ok {
+ projectID = q.nextProjectID
+
+ //
+ // assign project id to new container directory
+ //
+ err := setProjectID(targetPath, projectID)
+ if err != nil {
+ return err
+ }
+
+ q.quotas.Store(targetPath, projectID)
+ q.nextProjectID++
+ }
+
+ //
+ // set the quota limit for the container's project id
+ //
+ logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
+ return q.setProjectQuota(projectID, quota)
+}
+
+// ClearQuota removes the map entry in the quotas map for targetPath.
+// It does so to prevent the map leaking entries as directories are deleted.
+func (q *Control) ClearQuota(targetPath string) {
+ q.quotas.Delete(targetPath)
+}
+
+// setProjectQuota - set the quota for project id on xfs block device
+func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
+ var d C.fs_disk_quota_t
+ d.d_version = C.FS_DQUOT_VERSION
+ d.d_id = C.__u32(projectID)
+ d.d_flags = C.FS_PROJ_QUOTA
+
+ if quota.Size > 0 {
+ d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_blk_hardlimit = C.__u64(quota.Size / 512)
+ d.d_blk_softlimit = d.d_blk_hardlimit
+ }
+ if quota.Inodes > 0 {
+ d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
+ d.d_ino_hardlimit = C.__u64(quota.Inodes)
+ d.d_ino_softlimit = d.d_ino_hardlimit
+ }
+
+ cs := C.CString(q.backingFsBlockDev)
+ defer C.free(unsafe.Pointer(cs))
+
+ runQuotactl := func() syscall.Errno {
+ _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
+ uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
+ uintptr(unsafe.Pointer(&d)), 0, 0)
+ return errno
+ }
+
+ errno := runQuotactl()
+
+ // If the backingFsBlockDev does not exist any more then try to recreate it.
+ if errors.Is(errno, unix.ENOENT) {
+ if _, err := makeBackingFsDev(q.basePath); err != nil {
+ return fmt.Errorf(
+ "failed to recreate missing backingFsBlockDev %s for projid %d: %w",
+ q.backingFsBlockDev, projectID, err,
+ )
+ }
+
+ if errno := runQuotactl(); errno != 0 {
+ return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
+ projectID, q.backingFsBlockDev, errno)
+ }
+
+ } else if errno != 0 {
+ return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
+ projectID, q.backingFsBlockDev, errno)
+ }
+
+ return nil
+}
+
+// GetQuota - get the quota limits of a directory that was configured with SetQuota
+func (q *Control) GetQuota(targetPath string, quota *Quota) error {
+ d, err := q.fsDiskQuotaFromPath(targetPath)
+ if err != nil {
+ return err
+ }
+ quota.Size = uint64(d.d_blk_hardlimit) * 512
+ quota.Inodes = uint64(d.d_ino_hardlimit)
+ return nil
+}
+
+// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
+func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
+ d, err := q.fsDiskQuotaFromPath(targetPath)
+ if err != nil {
+ return err
+ }
+ usage.Size = int64(d.d_bcount) * 512
+ usage.InodeCount = int64(d.d_icount)
+
+ return nil
+}
+
+func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
+ var d C.fs_disk_quota_t
+ var projectID uint32
+ value, ok := q.quotas.Load(targetPath)
+ if ok {
+ projectID, ok = value.(uint32)
+ }
+ if !ok {
+ return d, fmt.Errorf("quota not found for path : %s", targetPath)
+ }
+
+ //
+ // get the quota limit for the container's project id
+ //
+ cs := C.CString(q.backingFsBlockDev)
+ defer C.free(unsafe.Pointer(cs))
+
+ _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
+ uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
+ uintptr(unsafe.Pointer(&d)), 0, 0)
+ if errno != 0 {
+ return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
+ projectID, q.backingFsBlockDev, errno)
+ }
+
+ return d, nil
+}
+
+// getProjectID - get the project id of path on xfs
+func getProjectID(targetPath string) (uint32, error) {
+ dir, err := openDir(targetPath)
+ if err != nil {
+ return 0, err
+ }
+ defer closeDir(dir)
+
+ var fsx C.struct_fsxattr
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
+ }
+
+ return uint32(fsx.fsx_projid), nil
+}
+
+// setProjectID - set the project id of path on xfs
+func setProjectID(targetPath string, projectID uint32) error {
+ dir, err := openDir(targetPath)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath)
+
+ var fsx C.struct_fsxattr
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
+ }
+ fsx.fsx_projid = C.__u32(projectID)
+ fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
+ _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
+ }
+
+ return nil
+}
+
+// stripProjectInherit strips the project inherit flag from a directory.
+// Used on the top-level directory to ensure project IDs are only inherited for
+// files in directories we set quotas on - not the directories we want to set
+// the quotas on, as that would make everything use the same project ID.
+func stripProjectInherit(targetPath string) error {
+ dir, err := openDir(targetPath)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var fsx C.struct_fsxattr
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno)
+ }
+ if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 {
+ // Flag is set, need to clear it.
+ logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath)
+ fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT
+ _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno)
+ }
+ }
+ return nil
+}
+
+// findNextProjectID - find the next project id to be used for containers
+// by scanning driver home directory to find used project ids
+func (q *Control) findNextProjectID() error {
+ files, err := os.ReadDir(q.basePath)
+ if err != nil {
+ return fmt.Errorf("read directory failed : %s", q.basePath)
+ }
+ for _, file := range files {
+ if !file.IsDir() {
+ continue
+ }
+ path := filepath.Join(q.basePath, file.Name())
+ projid, err := getProjectID(path)
+ if err != nil {
+ return err
+ }
+ if projid > 0 {
+ q.quotas.Store(path, projid)
+ }
+ if q.nextProjectID <= projid {
+ q.nextProjectID = projid + 1
+ }
+ }
+
+ return nil
+}
+
+func free(p *C.char) {
+ C.free(unsafe.Pointer(p))
+}
+
+func openDir(path string) (*C.DIR, error) {
+ Cpath := C.CString(path)
+ defer free(Cpath)
+
+ dir, errno := C.opendir(Cpath)
+ if dir == nil {
+ return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
+ }
+ return dir, nil
+}
+
+func closeDir(dir *C.DIR) {
+ if dir != nil {
+ C.closedir(dir)
+ }
+}
+
+func getDirFd(dir *C.DIR) uintptr {
+ return uintptr(C.dirfd(dir))
+}
+
+// Get the backing block device of the driver home directory
+// and create a block device node under the home directory
+// to be used by quotactl commands
+func makeBackingFsDev(home string) (string, error) {
+ var stat unix.Stat_t
+ if err := unix.Stat(home, &stat); err != nil {
+ return "", err
+ }
+
+ backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
+ backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
+ // Re-create just in case someone copied the home directory over to a new device
+ if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
+ if !errors.Is(err, unix.EEXIST) {
+ return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
+ }
+ // On EEXIST, try again after unlinking any potential leftover.
+ _ = unix.Unlink(backingFsBlockDevTmp)
+ if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
+ return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
+ }
+ }
+ if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
+ return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
+ }
+
+ return backingFsBlockDev, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index be6c75a58..fdc2ad161 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -1,20 +1,20 @@
-// +build !linux exclude_disk_quota !cgo
+//go:build !linux || exclude_disk_quota || !cgo
package quota
import (
- "github.com/pkg/errors"
+ "errors"
)
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
-type Control struct {
-}
+type Control struct{}
func NewControl(basePath string) (*Control, error) {
return nil, errors.New("filesystem does not support, or has not enabled quotas")
@@ -30,3 +30,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return errors.New("filesystem does not support, or has not enabled quotas")
}
+
+// ClearQuota removes the map entry in the quotas map for targetPath.
+// It does so to prevent the map leaking entries as directories are deleted.
+func (q *Control) ClearQuota(targetPath string) {}
diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
index 7743dcedb..d95bd398b 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_aufs,linux
+//go:build !exclude_graphdriver_aufs && linux
package register
diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
index 40ff1cdd0..01a322374 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_btrfs,linux
+//go:build !exclude_graphdriver_btrfs && linux
package register
diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
deleted file mode 100644
index cefe2e8c7..000000000
--- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !exclude_graphdriver_devicemapper,linux,cgo
-
-package register
-
-import (
- // register the devmapper graphdriver
- _ "github.com/containers/storage/drivers/devmapper"
-)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
index 30e3b4d74..6e82a1ea3 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_overlay,linux,cgo
+//go:build !exclude_graphdriver_overlay && linux && cgo
package register
diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
index c748468e5..136848f4a 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
+//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris
package register
diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go
index 5d80b8865..66ab89f7f 100644
--- a/vendor/github.com/containers/storage/drivers/template.go
+++ b/vendor/github.com/containers/storage/drivers/template.go
@@ -1,9 +1,8 @@
package graphdriver
import (
- "github.com/sirupsen/logrus"
-
"github.com/containers/storage/pkg/idtools"
+ "github.com/sirupsen/logrus"
)
// TemplateDriver is just barely enough of a driver that we can implement a
@@ -31,10 +30,11 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
if err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
+ defer diff.Close()
applyOptions := ApplyDiffOpts{
Diff: diff,
@@ -44,7 +44,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
}
if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
index 8ac80ee1d..17e9d5870 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux
package vfs // import "github.com/containers/storage/drivers/vfs"
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index 1b58e2f63..f60ec17b3 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -5,12 +5,14 @@ import (
"io"
"os"
"path/filepath"
+ "runtime"
"strconv"
"strings"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
@@ -19,15 +21,10 @@ import (
"github.com/vbatts/tar-split/tar/storage"
)
-var (
- // CopyDir defines the copy method to use.
- CopyDir = dirCopy
-)
-
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
- graphdriver.Register("vfs", Init)
+ graphdriver.MustRegister("vfs", Init)
}
// Init returns a new VFS driver.
@@ -35,16 +32,14 @@ func init() {
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
d := &Driver{
name: "vfs",
- homes: []string{home},
- idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ home: home,
+ imageStore: options.ImageStore,
}
- rootIDs := d.idMappings.RootPair()
- if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
+ if err := os.MkdirAll(filepath.Join(home, "dir"), 0o700); err != nil {
return nil, err
}
for _, option := range options.DriverOptions {
-
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
@@ -52,7 +47,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
key = strings.ToLower(key)
switch key {
case "vfs.imagestore", ".imagestore":
- d.homes = append(d.homes, strings.Split(val, ",")...)
+ d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
continue
case "vfs.mountopt":
return nil, fmt.Errorf("vfs driver does not support mount options")
@@ -67,6 +62,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("vfs driver does not support %s options", key)
}
}
+
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
@@ -79,11 +75,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
name string
- homes []string
- idMappings *idtools.IDMappings
+ home string
+ additionalHomes []string
ignoreChownErrors bool
naiveDiff graphdriver.DiffDriver
updater graphdriver.LayerIDMapUpdater
+ imageStore string
}
func (d *Driver) String() string {
@@ -97,7 +94,7 @@ func (d *Driver) Status() [][2]string {
// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
@@ -152,14 +149,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
return fmt.Errorf("--storage-opt is not supported for vfs")
}
- idMappings := d.idMappings
+ var uidMaps []idtools.IDMap
+ var gidMaps []idtools.IDMap
+
if opts != nil && opts.IDMappings != nil {
- idMappings = opts.IDMappings
+ uidMaps = opts.IDMappings.UIDs()
+ gidMaps = opts.IDMappings.GIDs()
}
- dir := d.dir(id)
- rootIDs := idMappings.RootPair()
- if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
+ rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return err
+ }
+
+ dir := d.dir2(id, ro)
+ if err := os.MkdirAll(filepath.Dir(dir), 0o700); err != nil {
return err
}
@@ -170,26 +174,33 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
}()
rootPerms := defaultPerms
+ if runtime.GOOS == "darwin" {
+ rootPerms = os.FileMode(0o700)
+ }
+
+ idPair := idtools.IDPair{UID: rootUID, GID: rootGID}
if parent != "" {
st, err := system.Stat(d.dir(parent))
if err != nil {
return err
}
rootPerms = os.FileMode(st.Mode())
- rootIDs.UID = int(st.UID())
- rootIDs.GID = int(st.GID())
+ idPair.UID = int(st.UID())
+ idPair.GID = int(st.GID())
}
- if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChownNew(dir, rootPerms, idPair); err != nil {
return err
}
labelOpts := []string{"level:s0"}
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
- label.SetFileLabel(dir, mountLabel)
+ if err := label.SetFileLabel(dir, mountLabel); err != nil {
+ logrus.Debugf("Set %s label to %q file ended with error: %v", mountLabel, dir, err)
+ }
}
if parent != "" {
parentDir, err := d.Get(parent, graphdriver.MountOpts{})
if err != nil {
- return fmt.Errorf("%s: %s", parent, err)
+ return fmt.Errorf("%s: %w", parent, err)
}
if err := dirCopy(parentDir, dir); err != nil {
return err
@@ -197,21 +208,34 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
}
return nil
-
}
-func (d *Driver) dir(id string) string {
- for i, home := range d.homes {
- if i > 0 {
- home = filepath.Join(home, d.String())
+func (d *Driver) dir2(id string, useImageStore bool) string {
+ var homedir string
+
+ if useImageStore && d.imageStore != "" {
+ homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
+ } else {
+ homedir = filepath.Join(d.home, "dir", filepath.Base(id))
+ }
+ if err := fileutils.Exists(homedir); err != nil {
+ additionalHomes := d.additionalHomes[:]
+ if d.imageStore != "" {
+ additionalHomes = append(additionalHomes, d.imageStore)
}
- candidate := filepath.Join(home, "dir", filepath.Base(id))
- fi, err := os.Stat(candidate)
- if err == nil && fi.IsDir() {
- return candidate
+ for _, home := range additionalHomes {
+ candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
+ fi, err := os.Stat(candidate)
+ if err == nil && fi.IsDir() {
+ return candidate
+ }
}
}
- return filepath.Join(d.homes[0], "dir", filepath.Base(id))
+ return homedir
+}
+
+func (d *Driver) dir(id string) string {
+ return d.dir2(id, false)
}
// Remove deletes the content from the directory for a given id.
@@ -222,15 +246,12 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
- switch len(options.Options) {
- case 0:
- case 1:
- if options.Options[0] == "ro" {
+
+ for _, opt := range options.Options {
+ if opt == "ro" {
// ignore "ro" option
- break
+ continue
}
- fallthrough
- default:
return "", fmt.Errorf("vfs driver does not support mount options")
}
if st, err := os.Stat(dir); err != nil {
@@ -256,14 +277,36 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
// Exists checks to see if the directory exists for the given id.
func (d *Driver) Exists(id string) bool {
- _, err := os.Stat(d.dir(id))
+ err := fileutils.Exists(d.dir(id))
return err == nil
}
+// List layers (not including additional image stores)
+func (d *Driver) ListLayers() ([]string, error) {
+ entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
+ if err != nil {
+ return nil, err
+ }
+
+ layers := make([]string, 0)
+
+ for _, entry := range entries {
+ id := entry.Name()
+ // Does it look like a datadir directory?
+ if !entry.IsDir() {
+ continue
+ }
+
+ layers = append(layers, id)
+ }
+
+ return layers, err
+}
+
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
- if len(d.homes) > 1 {
- return d.homes[1:]
+ if len(d.additionalHomes) > 0 {
+ return d.additionalHomes
}
return nil
}
@@ -276,7 +319,15 @@ func (d *Driver) SupportsShifting() bool {
// UpdateLayerIDMap updates ID mappings in a from matching the ones specified
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
- return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel)
+ if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil {
+ return err
+ }
+ dir := d.dir(id)
+ rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0})
+ if err != nil {
+ return err
+ }
+ return os.Chown(dir, rootIDs.UID, rootIDs.GID)
}
// Changes produces a list of changes between the specified layer
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 149151741..d38e74534 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -1,4 +1,5 @@
-//+build windows
+//go:build windows
+// +build windows
package windows
@@ -9,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -23,9 +23,10 @@ import (
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
- "github.com/containers/storage/drivers"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/longpath"
@@ -53,7 +54,7 @@ var (
// init registers the windows graph drivers to the register.
func init() {
- graphdriver.Register("windowsfilter", InitFilter)
+ graphdriver.MustRegister("windowsfilter", InitFilter)
// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
// debugging issues in the re-exec codepath significantly easier.
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
@@ -64,8 +65,7 @@ func init() {
}
}
-type checker struct {
-}
+type checker struct{}
func (c *checker) IsMounted(path string) bool {
return false
@@ -102,8 +102,8 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
}
- if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
- return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
+ if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil {
+ return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err)
}
d := &Driver{
@@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
return result
}
+// List layers (not including additional image stores)
+func (d *Driver) ListLayers() ([]string, error) {
+ return nil, graphdriver.ErrNotSupported
+}
+
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
@@ -227,7 +232,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
if err != nil {
return err
}
- if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil {
+ if err := fileutils.Exists(filepath.Join(parentPath, "Files")); err == nil {
// This is a legitimate parent layer (not the empty "-init" layer),
// so include it in the layer chain.
layerChain = []string{parentPath}
@@ -252,7 +257,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
storageOptions, err := parseStorageOpt(storageOpt)
if err != nil {
- return fmt.Errorf("Failed to parse storage options - %s", err)
+ return fmt.Errorf("failed to parse storage options - %s", err)
}
if storageOptions.size != 0 {
@@ -262,11 +267,11 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
}
}
- if _, err := os.Lstat(d.dir(parent)); err != nil {
+ if err := fileutils.Lexists(d.dir(parent)); err != nil {
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
- return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
+ return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err)
}
if err := d.setLayerChain(id, layerChain); err != nil {
@@ -474,7 +479,7 @@ func (d *Driver) Put(id string) error {
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error {
- items, err := ioutil.ReadDir(d.info.HomeDir)
+ items, err := os.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -759,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64,
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
- if strings.HasPrefix(base, archive.WhiteoutPrefix) {
- name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
+ if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
+ name := path.Join(path.Dir(hdr.Name), rm)
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
@@ -810,7 +815,7 @@ func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []
}
if err = cmd.Wait(); err != nil {
- return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
+ return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err)
}
return strconv.ParseInt(output.String(), 10, 64)
@@ -869,7 +874,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ..
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
- content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
+ content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
@@ -880,23 +885,23 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
- return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
+ return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600)
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
- content, err := ioutil.ReadFile(jPath)
+ content, err := os.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
- return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
+ return nil, fmt.Errorf("unable to read layerchain file - %s", err)
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
- return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
+ return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err)
}
return layerChain, nil
@@ -906,13 +911,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) {
func (d *Driver) setLayerChain(id string, chain []string) error {
content, err := json.Marshal(&chain)
if err != nil {
- return fmt.Errorf("Failed to marshall layerchain json - %s", err)
+ return fmt.Errorf("failed to marshall layerchain json - %s", err)
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
- err = ioutil.WriteFile(jPath, content, 0600)
+ err = os.WriteFile(jPath, content, 0o600)
if err != nil {
- return fmt.Errorf("Unable to write layerchain file - %s", err)
+ return fmt.Errorf("unable to write layerchain file - %s", err)
}
return nil
@@ -999,7 +1004,7 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
}
options.size = uint64(size)
default:
- return nil, fmt.Errorf("Unknown storage option: %s", key)
+ return nil, fmt.Errorf("unknown storage option: %s", key)
}
}
return &options, nil
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index e034bf152..ef26c6c7f 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -1,4 +1,4 @@
-// +build linux freebsd
+//go:build linux || freebsd
package zfs
@@ -17,9 +17,8 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
- "github.com/mistifyio/go-zfs"
+ zfs "github.com/mistifyio/go-zfs/v3"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -30,10 +29,10 @@ type zfsOptions struct {
mountOptions string
}
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
- graphdriver.Register("zfs", Init)
+ graphdriver.MustRegister("zfs", Init)
}
// Logger returns a zfs logger implementation.
@@ -54,15 +53,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
if _, err := exec.LookPath("zfs"); err != nil {
logger.Debugf("zfs command is not available: %v", err)
- return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
+ return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
}
- file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
+ file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600)
if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err)
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
+ return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
}
- defer file.Close()
+ defer unix.Close(file)
options, err := parseOptions(opt.DriverOptions)
if err != nil {
@@ -90,7 +89,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
filesystems, err := zfs.Filesystems(options.fsName)
if err != nil {
- return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+ return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err)
}
filesystemsCache := make(map[string]bool, len(filesystems))
@@ -103,23 +102,17 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
}
if rootDataset == nil {
- return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+ return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
- if err != nil {
- return nil, fmt.Errorf("Failed to get root uid/gid: %v", err)
- }
- if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
- return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
+ if err := os.MkdirAll(base, 0o700); err != nil {
+ return nil, fmt.Errorf("failed to create '%s': %w", base, err)
}
d := &Driver{
dataset: rootDataset,
options: options,
filesystemsCache: filesystemsCache,
- uidMaps: opt.UIDMaps,
- gidMaps: opt.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
@@ -140,7 +133,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
case "zfs.mountopt":
options.mountOptions = val
default:
- return options, fmt.Errorf("Unknown option %s", key)
+ return options, fmt.Errorf("unknown option %s", key)
}
}
return options, nil
@@ -149,7 +142,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
func lookupZfsDataset(rootdir string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(rootdir, &stat); err != nil {
- return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return "", fmt.Errorf("failed to access '%s': %w", rootdir, err)
}
wantedDev := stat.Dev
@@ -168,7 +161,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
}
}
- return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+ return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
}
// Driver holds information about the driver, such as zfs dataset, options and cache.
@@ -177,8 +170,6 @@ type Driver struct {
options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool
- uidMaps []idtools.IDMap
- gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
@@ -248,7 +239,9 @@ func (d *Driver) cloneFilesystem(name, parentName string) error {
}
if err != nil {
- snapshot.Destroy(zfs.DestroyDeferDeletion)
+ if err1 := snapshot.Destroy(zfs.DestroyDeferDeletion); err1 != nil {
+ logrus.Warnf("Destroy zfs.DestroyDeferDeletion: %v", err1)
+ }
return err
}
return snapshot.Destroy(zfs.DestroyDeferDeletion)
@@ -315,7 +308,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
if opts != nil {
rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs())
if err != nil {
- return fmt.Errorf("Failed to get root uid/gid: %v", err)
+ return fmt.Errorf("failed to get root uid/gid: %w", err)
}
mountLabel = opts.MountLabel
}
@@ -341,22 +334,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel)
if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil {
- return errors.Wrap(err, "error creating zfs mount")
+ return fmt.Errorf("creating zfs mount: %w", err)
}
defer func() {
- if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
- logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
+ if err := detachUnmount(mountpoint); err != nil {
+ logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err)
}
}()
if err := os.Chmod(mountpoint, defaultPerms); err != nil {
- return errors.Wrap(err, "error setting permissions on zfs mount")
+ return fmt.Errorf("setting permissions on zfs mount: %w", err)
}
// this is our first mount after creation of the filesystem, and the root dir may still have root
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
- return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint)
+ return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err)
}
}
@@ -377,7 +370,7 @@ func parseStorageOpt(storageOpt map[string]string) (string, error) {
case "size":
return v, nil
default:
- return "0", fmt.Errorf("Unknown option %s", key)
+ return "0", fmt.Errorf("unknown option %s", key)
}
}
return "0", nil
@@ -399,17 +392,22 @@ func (d *Driver) Remove(id string) error {
name := d.zfsPath(id)
dataset := zfs.Dataset{Name: name}
err := dataset.Destroy(zfs.DestroyRecursive)
- if err == nil {
- d.Lock()
- delete(d.filesystemsCache, name)
- d.Unlock()
+ if err != nil {
+ // We must be tolerant in case the image has already been removed,
+ // for example, accidentally by hand.
+ if _, err1 := zfs.GetDataset(name); err1 == nil {
+ return err
+ }
+ logrus.WithField("storage-driver", "zfs").Debugf("Layer %s has already been removed; ignore it and continue to delete the cache", id)
}
- return err
+ d.Lock()
+ delete(d.filesystemsCache, name)
+ d.Unlock()
+ return nil
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
-
mountpoint := d.mountPath(id)
if count := d.ctr.Increment(mountpoint); count > 1 {
return mountpoint, nil
@@ -449,23 +447,19 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
opts := label.FormatMountLabel(mountOptions, options.MountLabel)
logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return "", err
- }
// Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
+ if err := os.MkdirAll(mountpoint, 0o755); err != nil {
return "", err
}
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error creating zfs mount")
+ return "", fmt.Errorf("creating zfs mount: %w", err)
}
if remountReadOnly {
opts = label.FormatMountLabel("remount,ro", options.MountLabel)
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error remounting zfs mount read-only")
+ return "", fmt.Errorf("remounting zfs mount read-only: %w", err)
}
}
@@ -483,7 +477,7 @@ func (d *Driver) Put(id string) error {
logger.Debugf(`unmount("%s")`, mountpoint)
- if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
+ if err := detachUnmount(mountpoint); err != nil {
logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
}
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
@@ -506,6 +500,13 @@ func (d *Driver) Exists(id string) bool {
return d.filesystemsCache[d.zfsPath(id)]
}
+// List layers (not including additional image stores). Our layers aren't all
+// dependent on a single well-known dataset, so we can't reliably tell which
+// datasets are ours and which ones just look like they could be ours.
+func (d *Driver) ListLayers() ([]string, error) {
+ return nil, graphdriver.ErrNotSupported
+}
+
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
index bf6905159..c3c73c61e 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -2,10 +2,8 @@ package zfs
import (
"fmt"
- "strings"
- "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -13,27 +11,23 @@ import (
func checkRootdirFs(rootdir string) error {
var buf unix.Statfs_t
if err := unix.Statfs(rootdir, &buf); err != nil {
- return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return fmt.Errorf("failed to access '%s': %s", rootdir, err)
}
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites)
}
return nil
}
func getMountpoint(id string) string {
- maxlen := 12
-
- // we need to preserve filesystem suffix
- suffix := strings.SplitN(id, "-", 2)
-
- if len(suffix) > 1 {
- return id[:maxlen] + "-" + suffix[1]
- }
+ return id
+}
- return id[:maxlen]
+func detachUnmount(mountpoint string) error {
+ // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH
+ return unix.Unmount(mountpoint, unix.MNT_FORCE)
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
index edcb1da36..d43ba5c2b 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
@@ -1,9 +1,11 @@
package zfs
import (
+ "fmt"
+
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
)
func checkRootdirFs(rootDir string) error {
@@ -18,7 +20,7 @@ func checkRootdirFs(rootDir string) error {
if fsMagic != graphdriver.FsMagicZfs {
logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites)
}
return nil
@@ -27,3 +29,7 @@ func checkRootdirFs(rootDir string) error {
func getMountpoint(id string) string {
return id
}
+
+func detachUnmount(mountpoint string) error {
+ return unix.Unmount(mountpoint, unix.MNT_DETACH)
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
index 643b169bc..15a1447ac 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
@@ -1,11 +1,3 @@
-// +build !linux,!freebsd
+//go:build !linux && !freebsd
package zfs
-
-func checkRootdirFs(rootdir string) error {
- return nil
-}
-
-func getMountpoint(id string) string {
- return id
-}
diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go
index 5fc810b89..de6e37754 100644
--- a/vendor/github.com/containers/storage/errors.go
+++ b/vendor/github.com/containers/storage/errors.go
@@ -1,6 +1,8 @@
package storage
import (
+ "errors"
+
"github.com/containers/storage/types"
)
@@ -55,4 +57,9 @@ var (
ErrStoreIsReadOnly = types.ErrStoreIsReadOnly
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = types.ErrNotSupported
+ // ErrInvalidMappings is returned when the specified mappings are invalid.
+ ErrInvalidMappings = types.ErrInvalidMappings
+ // ErrInvalidNameOperation is returned when updateName is called with invalid operation.
+ // Internal error
+ errInvalidUpdateNameOperation = errors.New("invalid update name operation")
)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index e7ca56e64..e288f2bae 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -1,32 +1,59 @@
-go 1.14
+go 1.22.0
+
+// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates.
+// That generally means there should be no toolchain directive present.
module github.com/containers/storage
require (
- github.com/BurntSushi/toml v0.3.1
- github.com/Microsoft/go-winio v0.5.0
- github.com/Microsoft/hcsshim v0.8.17
- github.com/docker/go-units v0.4.0
+ github.com/BurntSushi/toml v1.4.0
+ github.com/Microsoft/go-winio v0.6.2
+ github.com/Microsoft/hcsshim v0.12.9
+ github.com/containerd/stargz-snapshotter/estargz v0.15.1
+ github.com/cyphar/filepath-securejoin v0.3.4
+ github.com/docker/go-units v0.5.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
- github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.12.3
- github.com/klauspost/pgzip v1.2.5
- github.com/mattn/go-shellwords v1.0.11
- github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
- github.com/moby/sys/mountinfo v0.4.1
+ github.com/json-iterator/go v1.1.12
+ github.com/klauspost/compress v1.17.11
+ github.com/klauspost/pgzip v1.2.6
+ github.com/mattn/go-shellwords v1.0.12
+ github.com/mistifyio/go-zfs/v3 v3.0.1
+ github.com/moby/sys/capability v0.3.0
+ github.com/moby/sys/mountinfo v0.7.2
+ github.com/moby/sys/user v0.3.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.0-rc95
- github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
- github.com/opencontainers/selinux v1.8.1
- github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.0
- github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
- github.com/tchap/go-patricia v2.3.0+incompatible
- github.com/ulikunitz/xz v0.5.10
- github.com/vbatts/tar-split v0.11.1
- golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
- golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
+ github.com/opencontainers/runtime-spec v1.2.0
+ github.com/opencontainers/selinux v1.11.1
+ github.com/sirupsen/logrus v1.9.3
+ github.com/stretchr/testify v1.9.0
+ github.com/tchap/go-patricia/v2 v2.3.1
+ github.com/ulikunitz/xz v0.5.12
+ github.com/vbatts/tar-split v0.11.6
+ golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
+ golang.org/x/sys v0.26.0
gotest.tools v2.2.0+incompatible
)
+
+require (
+ github.com/containerd/cgroups/v3 v3.0.3 // indirect
+ github.com/containerd/errdefs v0.3.0 // indirect
+ github.com/containerd/errdefs/pkg v0.3.0 // indirect
+ github.com/containerd/typeurl/v2 v2.2.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/grpc v1.67.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 5373d0597..118011246 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -1,879 +1,194 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.17 h1:yFHH5bghP9ij5Y34PPaMOE8g//oXZ0uJQeMENVo2zcI=
-github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
+github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
+github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
+github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
+github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
+github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
+github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
+github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
+github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
-github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
-github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
-github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
+github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
+github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
+github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
+github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
+github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
+github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
+github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0=
-github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA=
-github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
+github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
-github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
-github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
-github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
+github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
+golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -882,77 +197,15 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go
index f870b9cee..43cf1fb5f 100644
--- a/vendor/github.com/containers/storage/idset.go
+++ b/vendor/github.com/containers/storage/idset.go
@@ -1,9 +1,12 @@
package storage
import (
+ "fmt"
+ "strings"
+
"github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/types"
"github.com/google/go-intervals/intervalset"
- "github.com/pkg/errors"
)
// idSet represents a set of integer IDs. It is stored as an ordered set of intervals.
@@ -108,12 +111,12 @@ func (s *idSet) findAvailable(n int) (*idSet, error) {
iterator, cancel := s.iterator()
defer cancel()
for i := iterator(); n > 0 && i != nil; i = iterator() {
- i.end = minInt(i.end, i.start+n)
+ i.end = min(i.end, i.start+n)
intervals = append(intervals, *i)
n -= i.length()
}
if n > 0 {
- return nil, errors.New("could not find enough available IDs")
+ return nil, types.ErrNoAvailableIDs
}
return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil
}
@@ -126,7 +129,7 @@ func (s *idSet) zip(container *idSet) []idtools.IDMap {
defer containerCancel()
var out []idtools.IDMap
for h, c := hostIterator(), containerIterator(); h != nil && c != nil; {
- if n := minInt(h.length(), c.length()); n > 0 {
+ if n := min(h.length(), c.length()); n > 0 {
out = append(out, idtools.IDMap{
ContainerID: c.start,
HostID: h.start,
@@ -156,12 +159,12 @@ type interval struct {
}
func (i interval) length() int {
- return maxInt(0, i.end-i.start)
+ return max(0, i.end-i.start)
}
func (i interval) Intersect(other intervalset.Interval) intervalset.Interval {
j := other.(interval)
- return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)}
+ return interval{start: max(i.start, j.start), end: min(i.end, j.end)}
}
func (i interval) Before(other intervalset.Interval) bool {
@@ -180,15 +183,15 @@ func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, inte
}
// Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and
// [j.end, +inf).
- left := interval{start: i.start, end: minInt(i.end, j.start)}
- right := interval{start: maxInt(i.start, j.end), end: i.end}
+ left := interval{start: i.start, end: min(i.end, j.start)}
+ right := interval{start: max(i.start, j.end), end: i.end}
return left, right
}
func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval {
j := other.(interval)
if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) {
- return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)}
+ return interval{start: min(i.start, j.start), end: max(i.end, j.end)}
}
return interval{}
}
@@ -201,20 +204,48 @@ func (i interval) Encompass(other intervalset.Interval) intervalset.Interval {
case j.IsZero():
return i
default:
- return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)}
+ return interval{start: min(i.start, j.start), end: max(i.end, j.end)}
}
}
-func minInt(a, b int) int {
- if a < b {
- return a
+func hasOverlappingRanges(mappings []idtools.IDMap) error {
+ hostIntervals := intervalset.Empty()
+ containerIntervals := intervalset.Empty()
+
+ var conflicts []string
+
+ for _, m := range mappings {
+ c := interval{start: m.ContainerID, end: m.ContainerID + m.Size}
+ h := interval{start: m.HostID, end: m.HostID + m.Size}
+
+ added := false
+ overlaps := false
+
+ containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool {
+ overlaps = true
+ return false
+ })
+ if overlaps {
+ conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
+ added = true
+ }
+ containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c}))
+
+ hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool {
+ overlaps = true
+ return false
+ })
+ if overlaps && !added {
+ conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
+ }
+ hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h}))
}
- return b
-}
-func maxInt(a, b int) int {
- if a < b {
- return b
+ if conflicts != nil {
+ if len(conflicts) == 1 {
+ return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings)
+ }
+ return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings)
}
- return a
+ return nil
}
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index bca25a65b..5c9127ede 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -1,19 +1,21 @@
package storage
import (
- "io/ioutil"
+ "fmt"
"os"
"path/filepath"
+ "slices"
"strings"
"sync"
"time"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
const (
@@ -94,11 +96,17 @@ type Image struct {
Flags map[string]interface{} `json:"flags,omitempty"`
}
-// ROImageStore provides bookkeeping for information about Images.
-type ROImageStore interface {
- ROFileBasedStore
- ROMetadataStore
- ROBigDataStore
+// roImageStore provides bookkeeping for information about Images.
+type roImageStore interface {
+ roMetadataStore
+ roBigDataStore
+
+ // startReading makes sure the store is fresh, and locks it for reading.
+ // If this succeeds, the caller MUST call stopReading().
+ startReading() error
+
+ // stopReading releases locks obtained by startReading.
+ stopReading()
// Exists checks if there is an image with the given ID or name.
Exists(id string) bool
@@ -106,10 +114,6 @@ type ROImageStore interface {
// Get retrieves information about an image given an ID or name.
Get(id string) (*Image, error)
- // Lookup attempts to translate a name to an ID. Most methods do this
- // implicitly.
- Lookup(name string) (string, error)
-
// Images returns a slice enumerating the known images.
Images() ([]Image, error)
@@ -120,58 +124,76 @@ type ROImageStore interface {
ByDigest(d digest.Digest) ([]*Image, error)
}
-// ImageStore provides bookkeeping for information about Images.
-type ImageStore interface {
- ROImageStore
- RWFileBasedStore
- RWMetadataStore
- RWImageBigDataStore
- FlaggableStore
+// rwImageStore provides bookkeeping for information about Images.
+type rwImageStore interface {
+ roImageStore
+ rwMetadataStore
+ rwImageBigDataStore
+ flaggableStore
- // Create creates an image that has a specified ID (or a random one) and
+ // startWriting makes sure the store is fresh, and locks it for writing.
+ // If this succeeds, the caller MUST call stopWriting().
+ startWriting() error
+
+ // stopWriting releases locks obtained by startWriting.
+ stopWriting()
+
+ // create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
- Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
+ create(id string, names []string, layer string, options ImageOptions) (*Image, error)
- // SetNames replaces the list of names associated with an image with the
- // supplied values. The values are expected to be valid normalized
+ // updateNames modifies names associated with an image based on (op, names).
+ // The values are expected to be valid normalized
// named image references.
- SetNames(id string, names []string) error
+ updateNames(id string, names []string, op updateNameOperation) error
// Delete removes the record of the image.
Delete(id string) error
+ addMappedTopLayer(id, layer string) error
+ removeMappedTopLayer(id, layer string) error
+
+ // Clean up unreferenced per-image data.
+ GarbageCollect() error
+
// Wipe removes records of all images.
Wipe() error
}
type imageStore struct {
- lockfile Locker
+ // The following fields are only set when constructing imageStore, and must never be modified afterwards.
+ // They are safe to access without any other locking.
+ lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
dir string
- images []*Image
- idindex *truncindex.TruncIndex
- byid map[string]*Image
- byname map[string]*Image
- bydigest map[digest.Digest][]*Image
- loadMut sync.Mutex
+
+ inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
+ // The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
+ // Almost all users should use startReading() or startWriting().
+ lastWrite lockfile.LastWrite
+ images []*Image
+ idindex *truncindex.TruncIndex
+ byid map[string]*Image
+ byname map[string]*Image
+ bydigest map[digest.Digest][]*Image
}
func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
- Digests: copyDigestSlice(i.Digests),
- Names: copyStringSlice(i.Names),
- NamesHistory: copyStringSlice(i.NamesHistory),
+ Digests: copySlicePreferringNil(i.Digests),
+ Names: copySlicePreferringNil(i.Names),
+ NamesHistory: copySlicePreferringNil(i.NamesHistory),
TopLayer: i.TopLayer,
- MappedTopLayers: copyStringSlice(i.MappedTopLayers),
+ MappedTopLayers: copySlicePreferringNil(i.MappedTopLayers),
Metadata: i.Metadata,
- BigDataNames: copyStringSlice(i.BigDataNames),
- BigDataSizes: copyStringInt64Map(i.BigDataSizes),
- BigDataDigests: copyStringDigestMap(i.BigDataDigests),
+ BigDataNames: copySlicePreferringNil(i.BigDataNames),
+ BigDataSizes: copyMapPreferringNil(i.BigDataSizes),
+ BigDataDigests: copyMapPreferringNil(i.BigDataDigests),
Created: i.Created,
ReadOnly: i.ReadOnly,
- Flags: copyStringInterfaceMap(i.Flags),
+ Flags: copyMapPreferringNil(i.Flags),
}
}
@@ -186,6 +208,191 @@ func copyImageSlice(slice []*Image) []*Image {
return nil
}
+// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+//
+// This is an internal implementation detail of imageStore construction, every other caller
+// should use startReading() instead.
+func (r *imageStore) startWritingWithReload(canReload bool) error {
+ r.lockfile.Lock()
+ r.inProcessLock.Lock()
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+ }
+ }()
+
+ if canReload {
+ if _, err := r.reloadIfChanged(true); err != nil {
+ return err
+ }
+ }
+
+ succeeded = true
+ return nil
+}
+
+// startWriting makes sure the store is fresh, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+func (r *imageStore) startWriting() error {
+ return r.startWritingWithReload(true)
+}
+
+// stopWriting releases locks obtained by startWriting.
+func (r *imageStore) stopWriting() {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+}
+
+// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading.
+// If this succeeds, the caller MUST call stopReading().
+//
+// This is an internal implementation detail of imageStore construction, every other caller
+// should use startReading() instead.
+func (r *imageStore) startReadingWithReload(canReload bool) error {
+ // inProcessLocked calls the nested function with r.inProcessLock held for writing.
+ inProcessLocked := func(fn func() error) error {
+ r.inProcessLock.Lock()
+ defer r.inProcessLock.Unlock()
+ return fn()
+ }
+
+ r.lockfile.RLock()
+ unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
+ defer func() {
+ if unlockFn != nil {
+ unlockFn()
+ }
+ }()
+ r.inProcessLock.RLock()
+ unlockFn = r.stopReading
+
+ if canReload {
+ // If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
+ _, modified, err := r.modified()
+ if err != nil {
+ return err
+ }
+ if modified {
+ // We are unlucky, and need to reload.
+ // NOTE: Multiple goroutines can get to this place approximately simultaneously.
+ r.inProcessLock.RUnlock()
+ unlockFn = r.lockfile.Unlock
+
+ // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally
+ // trigger a load below; we (lock and) reloadIfChanged() again.
+
+ // First try reloading with r.lockfile held for reading.
+ // r.inProcessLock will serialize all goroutines that got here;
+ // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
+ var tryLockedForWriting bool
+ if err := inProcessLocked(func() error {
+ // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile,
+ // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock.
+ var err error
+ tryLockedForWriting, err = r.reloadIfChanged(false)
+ return err
+ }); err != nil {
+ if !tryLockedForWriting {
+ return err
+ }
+ // Not good enough, we need r.lockfile held for writing. So, let’s do that.
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.Lock()
+ unlockFn = r.lockfile.Unlock
+ if err := inProcessLocked(func() error {
+ _, err := r.reloadIfChanged(true)
+ return err
+ }); err != nil {
+ return err
+ }
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.RLock()
+ unlockFn = r.lockfile.Unlock
+ // We need to check for a reload once more because the on-disk state could have been modified
+ // after we released the lock.
+ // If that, _again_, finds inconsistent state, just give up.
+ // We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
+ // shouldn’t be saved (by correct implementations) in the first place.
+ if err := inProcessLocked(func() error {
+ _, err := r.reloadIfChanged(false)
+ return err
+ }); err != nil {
+ return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
+ }
+ }
+
+ // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because
+ // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
+ // protects us against in-process writers modifying data.
+ // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
+ // and 2) access to the in-memory data is not racy;
+ // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
+
+ r.inProcessLock.RLock()
+ }
+ }
+
+ unlockFn = nil
+ return nil
+}
+
+// startReading makes sure the store is fresh, and locks it for reading.
+// If this succeeds, the caller MUST call stopReading().
+func (r *imageStore) startReading() error {
+ return r.startReadingWithReload(true)
+}
+
+// stopReading releases locks obtained by startReading.
+func (r *imageStore) stopReading() {
+ r.inProcessLock.RUnlock()
+ r.lockfile.Unlock()
+}
+
+// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
+// and a lockfile.LastWrite value for that update.
+//
+// The caller must hold r.lockfile for reading _or_ writing.
+// The caller must hold r.inProcessLock for reading or writing.
+func (r *imageStore) modified() (lockfile.LastWrite, bool, error) {
+ return r.lockfile.ModifiedSince(r.lastWrite)
+}
+
+// reloadIfChanged reloads the contents of the store from disk if it is changed.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+//
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// reloadIfChanged() with lockedForWriting could succeed.
+func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
+ lastWrite, modified, err := r.modified()
+ if err != nil {
+ return false, err
+ }
+ // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
+ // and modify no fields, to ensure they see fresh data:
+ // r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
+ // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
+ // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
+ if modified {
+ if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
+ return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
+ }
+ r.lastWrite = lastWrite
+ }
+ return false, nil
+}
+
+// Requires startReading or startWriting.
func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
@@ -194,6 +401,41 @@ func (r *imageStore) Images() ([]Image, error) {
return images, nil
}
+// This looks for datadirs in the store directory that are not referenced
+// by the json file and removes it. These can happen in the case of unclean
+// shutdowns.
+// Requires startReading or startWriting.
+func (r *imageStore) GarbageCollect() error {
+ entries, err := os.ReadDir(r.dir)
+ if err != nil {
+ // Unexpected, don't try any GC
+ return err
+ }
+
+ for _, entry := range entries {
+ id := entry.Name()
+ // Does it look like a datadir directory?
+ if !entry.IsDir() || stringid.ValidateID(id) != nil {
+ continue
+ }
+
+ // Should the id be there?
+ if r.byid[id] != nil {
+ continue
+ }
+
+ // Otherwise remove datadir
+ logrus.Debugf("removing %q", filepath.Join(r.dir, id))
+ moreErr := os.RemoveAll(filepath.Join(r.dir, id))
+ // Propagate first error
+ if moreErr != nil && err == nil {
+ err = moreErr
+ }
+ }
+
+ return err
+}
+
func (r *imageStore) imagespath() string {
return filepath.Join(r.dir, "images.json")
}
@@ -216,12 +458,13 @@ func bigDataNameIsManifest(name string) bool {
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
// list of the unique values that would identify the image.
+// The caller must hold r.inProcessLock for writing.
func (i *Image) recomputeDigests() error {
validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1)
digests := make(map[digest.Digest]struct{})
if i.Digest != "" {
if err := i.Digest.Validate(); err != nil {
- return errors.Wrapf(err, "error validating image digest %q", string(i.Digest))
+ return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err)
}
digests[i.Digest] = struct{}{}
validDigests = append(validDigests, i.Digest)
@@ -230,8 +473,8 @@ func (i *Image) recomputeDigests() error {
if !bigDataNameIsManifest(name) {
continue
}
- if digest.Validate() != nil {
- return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
+ if err := digest.Validate(); err != nil {
+ return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err)
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
@@ -246,122 +489,166 @@ func (i *Image) recomputeDigests() error {
return nil
}
-func (r *imageStore) Load() error {
- shouldSave := false
+// load reloads the contents of the store from disk.
+//
+// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
+// manage r.lastWrite.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// retrying with lockedForWriting could succeed.
+func (r *imageStore) load(lockedForWriting bool) (bool, error) {
rpath := r.imagespath()
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
- return err
+ return false, err
}
+
images := []*Image{}
- idlist := []string{}
+ if len(data) != 0 {
+ if err := json.Unmarshal(data, &images); err != nil {
+ return false, fmt.Errorf("loading %q: %w", rpath, err)
+ }
+ }
+ idlist := make([]string, 0, len(images))
ids := make(map[string]*Image)
names := make(map[string]*Image)
digests := make(map[digest.Digest][]*Image)
- if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
- idlist = make([]string, 0, len(images))
- for n, image := range images {
- ids[image.ID] = images[n]
- idlist = append(idlist, image.ID)
- for _, name := range image.Names {
- if conflict, ok := names[name]; ok {
- r.removeName(conflict, name)
- shouldSave = true
- }
- }
- // Compute the digest list.
- err = image.recomputeDigests()
- if err != nil {
- return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
- }
- for _, name := range image.Names {
- names[name] = image
- }
- for _, digest := range image.Digests {
- list := digests[digest]
- digests[digest] = append(list, image)
+ var errorToResolveBySaving error // == nil
+ for n, image := range images {
+ ids[image.ID] = images[n]
+ idlist = append(idlist, image.ID)
+ for _, name := range image.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ errorToResolveBySaving = ErrDuplicateImageNames
}
- image.ReadOnly = !r.IsReadWrite()
}
+ // Compute the digest list.
+ if err := image.recomputeDigests(); err != nil {
+ return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
+ }
+ for _, name := range image.Names {
+ names[name] = image
+ }
+ for _, digest := range image.Digests {
+ list := digests[digest]
+ digests[digest] = append(list, image)
+ }
+ image.ReadOnly = !r.lockfile.IsReadWrite()
}
- if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
- return ErrDuplicateImageNames
+
+ if errorToResolveBySaving != nil {
+ if !r.lockfile.IsReadWrite() {
+ return false, errorToResolveBySaving
+ }
+ if !lockedForWriting {
+ return true, errorToResolveBySaving
+ }
}
r.images = images
- r.idindex = truncindex.NewTruncIndex(idlist)
+ r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
r.byid = ids
r.byname = names
r.bydigest = digests
- if shouldSave {
- return r.Save()
+ if errorToResolveBySaving != nil {
+ return false, r.Save()
}
- return nil
+ return false, nil
}
+// Save saves the contents of the store to disk.
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *imageStore) Save() error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
- }
- if !r.Locked() {
- return errors.New("image store is not locked for writing")
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
+ r.lockfile.AssertLockedForWriting()
rpath := r.imagespath()
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
jdata, err := json.Marshal(&r.images)
if err != nil {
return err
}
- defer r.Touch()
- return ioutils.AtomicWriteFile(rpath, jdata, 0600)
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.lockfile.RecordWrite()
+ if err != nil {
+ return err
+ }
+ r.lastWrite = lw
+ if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil {
+ return err
+ }
+ return nil
}
-func newImageStore(dir string) (ImageStore, error) {
- if err := os.MkdirAll(dir, 0700); err != nil {
+func newImageStore(dir string) (rwImageStore, error) {
+ if err := os.MkdirAll(dir, 0o700); err != nil {
return nil, err
}
- lockfile, err := GetLockfile(filepath.Join(dir, "images.lock"))
+ lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
- lockfile.Lock()
- defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
+
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
bydigest: make(map[digest.Digest][]*Image),
}
- if err := istore.Load(); err != nil {
+ if err := istore.startWritingWithReload(false); err != nil {
+ return nil, err
+ }
+ defer istore.stopWriting()
+ istore.lastWrite, err = istore.lockfile.GetLastWrite()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := istore.load(true); err != nil {
return nil, err
}
return &istore, nil
}
-func newROImageStore(dir string) (ROImageStore, error) {
- lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
+func newROImageStore(dir string) (roImageStore, error) {
+ lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
- lockfile.RLock()
- defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
+
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
bydigest: make(map[digest.Digest][]*Image),
}
- if err := istore.Load(); err != nil {
+ if err := istore.startReadingWithReload(false); err != nil {
+ return nil, err
+ }
+ defer istore.stopReading()
+ istore.lastWrite, err = istore.lockfile.GetLastWrite()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := istore.load(false); err != nil {
return nil, err
}
return &istore, nil
}
+// Requires startReading or startWriting.
func (r *imageStore) lookup(id string) (*Image, bool) {
if image, ok := r.byid[id]; ok {
return image, ok
@@ -374,25 +661,27 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
return nil, false
}
+// Requires startWriting.
func (r *imageStore) ClearFlag(id string, flag string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
delete(image.Flags, flag)
return r.Save()
}
+// Requires startWriting.
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
@@ -401,9 +690,10 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save()
}
-func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
- if !r.IsReadWrite() {
- return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
+// Requires startWriting.
+func (r *imageStore) create(id string, names []string, layer string, options ImageOptions) (image *Image, err error) {
+ if !r.lockfile.IsReadWrite() {
+ return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if id == "" {
id = stringid.GenerateRandomID()
@@ -414,59 +704,85 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
- return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
+ return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID)
}
- names = dedupeNames(names)
+ names = dedupeStrings(names)
for _, name := range names {
if image, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
+ return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
}
}
- if created.IsZero() {
- created = time.Now().UTC()
+ image = &Image{
+ ID: id,
+ Digest: options.Digest,
+ Digests: dedupeDigests(options.Digests),
+ Names: names,
+ NamesHistory: copySlicePreferringNil(options.NamesHistory),
+ TopLayer: layer,
+ Metadata: options.Metadata,
+ BigDataNames: []string{},
+ BigDataSizes: make(map[string]int64),
+ BigDataDigests: make(map[string]digest.Digest),
+ Created: options.CreationDate,
+ Flags: newMapFrom(options.Flags),
+ }
+ if image.Created.IsZero() {
+ image.Created = time.Now().UTC()
+ }
+ err = image.recomputeDigests()
+ if err != nil {
+ return nil, fmt.Errorf("validating digests for new image: %w", err)
+ }
+ r.images = append(r.images, image)
+ // This can only fail on duplicate IDs, which shouldn’t happen — and in
+ // that case the index is already in the desired state anyway.
+ // Implementing recovery from an unlikely and unimportant failure here
+ // would be too risky.
+ _ = r.idindex.Add(id)
+ r.byid[id] = image
+ for _, name := range names {
+ r.byname[name] = image
}
- if err == nil {
- image = &Image{
- ID: id,
- Digest: searchableDigest,
- Digests: nil,
- Names: names,
- TopLayer: layer,
- Metadata: metadata,
- BigDataNames: []string{},
- BigDataSizes: make(map[string]int64),
- BigDataDigests: make(map[string]digest.Digest),
- Created: created,
- Flags: make(map[string]interface{}),
- }
- err := image.recomputeDigests()
+ for _, digest := range image.Digests {
+ list := r.bydigest[digest]
+ r.bydigest[digest] = append(list, image)
+ }
+ defer func() {
if err != nil {
- return nil, errors.Wrapf(err, "error validating digests for new image")
+ // now that the in-memory structures know about the new
+ // record, we can use regular Delete() to clean up if
+ // anything breaks from here on out
+ if e := r.Delete(id); e != nil {
+ logrus.Debugf("while cleaning up partially-created image %q we failed to create: %v", id, e)
+ }
}
- r.images = append(r.images, image)
- r.idindex.Add(id)
- r.byid[id] = image
- for _, name := range names {
- r.byname[name] = image
+ }()
+ err = r.Save()
+ if err != nil {
+ return nil, err
+ }
+ for _, item := range options.BigData {
+ if item.Digest == "" {
+ item.Digest = digest.Canonical.FromBytes(item.Data)
}
- for _, digest := range image.Digests {
- list := r.bydigest[digest]
- r.bydigest[digest] = append(list, image)
+ if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil {
+ return nil, err
}
- err = r.Save()
- image = copyImage(image)
}
+ image = copyImage(image)
return image, err
}
+// Requires startWriting.
func (r *imageStore) addMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok {
image.MappedTopLayers = append(image.MappedTopLayers, layer)
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+// Requires startWriting.
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok {
initialLen := len(image.MappedTopLayers)
@@ -477,93 +793,97 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error {
}
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+// Requires startReading or startWriting.
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
}
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+// Requires startWriting.
func (r *imageStore) SetMetadata(id, metadata string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+// The caller must hold r.inProcessLock for writing.
func (r *imageStore) removeName(image *Image, name string) {
image.Names = stringSliceWithoutValue(image.Names, name)
}
+// The caller must hold r.inProcessLock for writing.
func (i *Image) addNameToHistory(name string) {
- i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
+ i.NamesHistory = dedupeStrings(append([]string{name}, i.NamesHistory...))
}
-func (r *imageStore) SetNames(id string, names []string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
+// Requires startWriting.
+func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
- names = dedupeNames(names)
- if image, ok := r.lookup(id); ok {
- for _, name := range image.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherImage, ok := r.byname[name]; ok {
- r.removeName(otherImage, name)
- }
- r.byname[name] = image
- image.addNameToHistory(name)
+ image, ok := r.lookup(id)
+ if !ok {
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+ }
+ oldNames := image.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherImage, ok := r.byname[name]; ok {
+ r.removeName(otherImage, name)
}
- image.Names = names
- return r.Save()
+ r.byname[name] = image
+ image.addNameToHistory(name)
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ image.Names = names
+ return r.Save()
}
+// Requires startWriting.
func (r *imageStore) Delete(id string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
id = image.ID
- toDeleteIndex := -1
- for i, candidate := range r.images {
- if candidate.ID == id {
- toDeleteIndex = i
- }
- }
delete(r.byid, id)
- r.idindex.Delete(id)
+ // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
+ // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
+ _ = r.idindex.Delete(id)
for _, name := range image.Names {
delete(r.byname, name)
}
for _, digest := range image.Digests {
- prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
+ prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
+ return i == image
+ })
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
- if toDeleteIndex != -1 {
- // delete the image at toDeleteIndex
- if toDeleteIndex == len(r.images)-1 {
- r.images = r.images[:len(r.images)-1]
- } else {
- r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
- }
- }
+ r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
+ return candidate.ID == id
+ })
if err := r.Save(); err != nil {
return err
}
@@ -573,55 +893,50 @@ func (r *imageStore) Delete(id string) error {
return nil
}
+// Requires startReading or startWriting.
func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok {
return copyImage(image), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
-}
-
-func (r *imageStore) Lookup(name string) (id string, err error) {
- if image, ok := r.lookup(name); ok {
- return image.ID, nil
- }
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+// Requires startReading or startWriting.
func (r *imageStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
+// Requires startReading or startWriting.
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return copyImageSlice(images), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d)
+ return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown)
}
+// Requires startReading or startWriting.
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- return ioutil.ReadFile(r.datapath(image.ID, key))
+ return os.ReadFile(r.datapath(image.ID, key))
}
+// Requires startReading or startWriting.
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- if image.BigDataSizes == nil {
- image.BigDataSizes = make(map[string]int64)
- }
- if size, ok := image.BigDataSizes[key]; ok {
+ if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@@ -630,69 +945,64 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown
}
+// Requires startReading or startWriting.
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
- }
- if image.BigDataDigests == nil {
- image.BigDataDigests = make(map[string]digest.Digest)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- if d, ok := image.BigDataDigests[key]; ok {
+ if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil.
return d, nil
}
return "", ErrDigestUnknown
}
+// Requires startReading or startWriting.
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
- }
- return copyStringSlice(image.BigDataNames), nil
-}
-
-func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
- modified := make([]*Image, 0, len(slice))
- for _, v := range slice {
- if v == value {
- continue
- }
- modified = append(modified, v)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- return modified
+ return copySlicePreferringNil(image.BigDataNames), nil
}
+// Requires startWriting.
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
- if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
- }
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
- }
- err := os.MkdirAll(r.datadir(image.ID), 0700)
- if err != nil {
- return err
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
+ var err error
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if digestManifest == nil {
- return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
+ return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown)
}
if newDigest, err = digestManifest(data); err != nil {
- return errors.Wrapf(err, "error digesting manifest")
+ return fmt.Errorf("digesting manifest: %w", err)
}
} else {
newDigest = digest.Canonical.FromBytes(data)
}
- err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
+ return r.setBigData(image, key, data, newDigest)
+}
+
+// Requires startWriting.
+func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest digest.Digest) error {
+ if key == "" {
+ return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
+ }
+ err := os.MkdirAll(r.datadir(image.ID), 0o700)
+ if err != nil {
+ return err
+ }
+ err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600)
if err == nil {
save := false
if image.BigDataSizes == nil {
@@ -708,21 +1018,16 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
- addName := true
- for _, name := range image.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(image.BigDataNames, key) {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
- prunedList := imageSliceWithoutValue(list, image)
+ prunedList := slices.DeleteFunc(list, func(i *Image) bool {
+ return i == image
+ })
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
@@ -731,15 +1036,13 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
}
}
if err = image.recomputeDigests(); err != nil {
- return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
+ return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
- if len(list) == len(imageSliceWithoutValue(list, image)) {
- // the list isn't shortened by trying to prune this image from it,
- // so it's not in there yet
+ if !slices.Contains(list, image) {
r.bydigest[newDigest] = append(list, image)
}
}
@@ -750,9 +1053,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
return err
}
+// Requires startWriting.
func (r *imageStore) Wipe() error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
@@ -765,50 +1069,3 @@ func (r *imageStore) Wipe() error {
}
return nil
}
-
-func (r *imageStore) Lock() {
- r.lockfile.Lock()
-}
-
-func (r *imageStore) RecursiveLock() {
- r.lockfile.RecursiveLock()
-}
-
-func (r *imageStore) RLock() {
- r.lockfile.RLock()
-}
-
-func (r *imageStore) Unlock() {
- r.lockfile.Unlock()
-}
-
-func (r *imageStore) Touch() error {
- return r.lockfile.Touch()
-}
-
-func (r *imageStore) Modified() (bool, error) {
- return r.lockfile.Modified()
-}
-
-func (r *imageStore) IsReadWrite() bool {
- return r.lockfile.IsReadWrite()
-}
-
-func (r *imageStore) TouchedSince(when time.Time) bool {
- return r.lockfile.TouchedSince(when)
-}
-
-func (r *imageStore) Locked() bool {
- return r.lockfile.Locked()
-}
-
-func (r *imageStore) ReloadIfChanged() error {
- r.loadMut.Lock()
- defer r.loadMut.Unlock()
-
- modified, err := r.Modified()
- if err == nil && modified {
- return r.Load()
- }
- return err
-}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 9b05474bb..6fe1a0803 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -2,13 +2,15 @@ package storage
import (
"bytes"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "maps"
"os"
"path"
"path/filepath"
"reflect"
+ "slices"
"sort"
"strings"
"sync"
@@ -18,15 +20,17 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/tarlog"
"github.com/containers/storage/pkg/truncindex"
+ multierror "github.com/hashicorp/go-multierror"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
- "github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
+ "github.com/opencontainers/selinux/go-selinux"
+ "github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
@@ -35,8 +39,28 @@ import (
const (
tarSplitSuffix = ".tar-split.gz"
incompleteFlag = "incomplete"
+ // maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state
+ // in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state)
+ // until we just give up.
+ maxLayerStoreCleanupIterations = 3
)
+type layerLocations uint8
+
+// The backing store is split in two json files, one (the volatile)
+// that is written without fsync() meaning it isn't as robust to
+// unclean shutdown
+const (
+ stableLayerLocation layerLocations = 1 << iota
+ volatileLayerLocation
+
+ numLayerLocationIndex = iota
+)
+
+func layerLocationFromIndex(index int) layerLocations {
+ return 1 << index
+}
+
// A Layer is a record of a copy-on-write layer that's stored by the lower
// level graph driver.
type Layer struct {
@@ -61,13 +85,26 @@ type Layer struct {
MountLabel string `json:"mountlabel,omitempty"`
// MountPoint is the path where the layer is mounted, or where it was most
- // recently mounted. This can change between subsequent Unmount() and
- // Mount() calls, so the caller should consult this value after Mount()
- // succeeds to find the location of the container's root filesystem.
+ // recently mounted.
+ //
+ // WARNING: This field is a snapshot in time: (except for users inside c/storage that
+ // hold the mount lock) the true value can change between subsequent
+ // calls to c/storage API.
+ //
+ // Users that need to handle concurrent mount/unmount attempts should not access this
+ // field at all, and should only use the path returned by .Mount() (and that’s only
+ // assuming no other user will concurrently decide to unmount that mount point).
MountPoint string `json:"-"`
// MountCount is used as a reference count for the container's layer being
// mounted at the mount point.
+ //
+ // WARNING: This field is a snapshot in time; (except for users inside c/storage that
+ // hold the mount lock) the true value can change between subsequent
+ // calls to c/storage API.
+ //
+ // In situations where concurrent mount/unmount attempts can happen, this field
+ // should not be used for any decisions, maybe apart from heuristic user warnings.
MountCount int `json:"-"`
// Created is the datestamp for when this layer was created. Older
@@ -77,33 +114,43 @@ type Layer struct {
Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to
- // ApplyDiff() or Put(), as it was presented to us.
+ // ApplyDiff() or create(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to
- // ApplyDiff() or Put(), as it was presented to us. If
+ // ApplyDiff() or create(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an
// uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to
- // ApplyDiff() or Put(), after we decompressed it. Often referred to
+ // ApplyDiff() or create(), after we decompressed it. Often referred to
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
+ // TOCDigest represents the digest of the Table of Contents (TOC) of the blob.
+ // This digest is utilized when the UncompressedDigest is not
+ // validated during the partial image pull process, but the
+ // TOC itself is validated.
+ // It serves as an alternative reference under these specific conditions.
+ TOCDigest digest.Digest `json:"toc-digest,omitempty"`
+
// UncompressedSize is the length of the blob that was last passed to
- // ApplyDiff() or Put(), after we decompressed it. If
- // UncompressedDigest is not set, this should be treated as if it were
- // an uninitialized value.
+ // ApplyDiff() or create(), after we decompressed it.
+ //
+ // - If UncompressedDigest is set, this must be set to a valid value.
+ // - Otherwise, if TOCDigest is set, this is either valid or -1.
+ // - If neither of this digests is set, this should be treated as if it were
+ // an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
- // that was last passed to ApplyDiff() or Put().
+ // that was last passed to ApplyDiff() or create().
CompressionType archive.Compression `json:"compression,omitempty"`
// UIDs and GIDs are lists of UIDs and GIDs used in the layer. This
// field is only populated (i.e., will only contain one or more
- // entries) if the layer was created using ApplyDiff() or Put().
+ // entries) if the layer was created using ApplyDiff() or create().
UIDs []uint32 `json:"uidset,omitempty"`
GIDs []uint32 `json:"gidset,omitempty"`
@@ -118,6 +165,9 @@ type Layer struct {
// ReadOnly is true if this layer resides in a read-only layer store.
ReadOnly bool `json:"-"`
+ // volatileStore is true if the container is from the volatile json file
+ volatileStore bool `json:"-"`
+
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
// memory when being read from or written to disk.
@@ -136,13 +186,26 @@ type DiffOptions struct {
Compression *archive.Compression
}
-// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
+// stagedLayerOptions are the options passed to .create to populate a staged
+// layer
+type stagedLayerOptions struct {
+ DiffOutput *drivers.DriverWithDifferOutput
+ DiffOptions *drivers.ApplyDiffWithDifferOpts
+}
+
+// roLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
-type ROLayerStore interface {
- ROFileBasedStore
- ROMetadataStore
- ROLayerBigDataStore
+type roLayerStore interface {
+ roMetadataStore
+ roLayerBigDataStore
+
+ // startReading makes sure the store is fresh, and locks it for reading.
+ // If this succeeds, the caller MUST call stopReading().
+ startReading() error
+
+ // stopReading releases locks obtained by startReading.
+ stopReading()
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
@@ -176,10 +239,6 @@ type ROLayerStore interface {
// found, it returns an error.
Size(name string) (int64, error)
- // Lookup attempts to translate a name to an ID. Most methods do this
- // implicitly.
- Lookup(name string) (string, error)
-
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
@@ -188,38 +247,42 @@ type ROLayerStore interface {
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
+ // LayersByTOCDigest returns a slice of the layers with the
+ // specified uncompressed digest value recorded for them.
+ LayersByTOCDigest(d digest.Digest) ([]Layer, error)
+
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
}
-// LayerStore wraps a graph driver, adding the ability to refer to layers by
+// rwLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
-type LayerStore interface {
- ROLayerStore
- RWFileBasedStore
- RWMetadataStore
- FlaggableStore
- RWLayerBigDataStore
-
- // Create creates a new layer, optionally giving it a specified ID rather than
+type rwLayerStore interface {
+ roLayerStore
+ rwMetadataStore
+ flaggableStore
+ rwLayerBigDataStore
+
+ // startWriting makes sure the store is fresh, and locks it for writing.
+ // If this succeeds, the caller MUST call stopWriting().
+ startWriting() error
+
+ // stopWriting releases locks obtained by startWriting.
+ stopWriting()
+
+ // create creates a new layer, optionally giving it a specified ID rather than
// a randomly-generated one, either inheriting data from another specified
// layer or the empty base layer. The new layer can optionally be given names
// and have an SELinux label specified for use when mounting it. Some
// underlying drivers can accept a "size" option. At this time, most
// underlying drivers do not themselves distinguish between writeable
- // and read-only layers.
- Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
-
- // CreateWithFlags combines the functions of Create and SetFlag.
- CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
+ // and read-only layers. Returns the new layer structure and the size of the
+ // diff which was applied to its parent to initialize its contents.
+ create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
- // Put combines the functions of CreateWithFlags and ApplyDiff.
- Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
-
- // SetNames replaces the list of names associated with a layer with the
- // supplied values.
- SetNames(id string, names []string) error
+ // updateNames modifies names associated with a layer based on (op, names).
+ updateNames(id string, names []string, op updateNameOperation) error
// Delete deletes a layer with the specified name or ID.
Delete(id string) error
@@ -233,8 +296,15 @@ type LayerStore interface {
// The mappings used by the container can be specified.
Mount(id string, options drivers.MountOpts) (string, error)
- // Unmount unmounts a layer when it is no longer in use.
- Unmount(id string, force bool) (bool, error)
+ // unmount unmounts a layer when it is no longer in use.
+ // If conditional is set, it will fail with ErrLayerNotMounted if the layer is not mounted (without conditional, the caller is
+ // making a promise that the layer is actually mounted).
+ // If force is set, it will physically try to unmount it even if it is mounted multiple times, or even if (!conditional and)
+ // there are no records of it being mounted in the first place.
+ // It returns whether the layer was still mounted at the time this function returned.
+ // WARNING: The return value may already be obsolete by the time it is available
+ // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
+ unmount(id string, force bool, conditional bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
Mounted(id string) (int, error)
@@ -247,51 +317,129 @@ type LayerStore interface {
// applies its changes to a specified layer.
ApplyDiff(to string, diff io.Reader) (int64, error)
- // ApplyDiffWithDiffer applies the changes through the differ callback function.
- // If to is the empty string, then a staging directory is created by the driver.
- ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+ // applyDiffWithDifferNoLock applies the changes through the differ callback function.
+ applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
- // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
- ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
+ // applyDiffFromStagingDirectory uses diffOutput.Target to create the diff.
+ applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// DifferTarget gets the location where files are stored for the layer.
DifferTarget(id string) (string, error)
- // LoadLocked wraps Load in a locked state. This means it loads the store
- // and cleans-up invalid layers if needed.
- LoadLocked() error
-
// PutAdditionalLayer creates a layer using the diff contained in the additional layer
// store.
// This API is experimental and can be changed without bumping the major version number.
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
+
+ // Clean up unreferenced layers
+ GarbageCollect() error
+}
+
+type multipleLockFile struct {
+ lockfiles []*lockfile.LockFile
+}
+
+func (l multipleLockFile) Lock() {
+ for _, lock := range l.lockfiles {
+ lock.Lock()
+ }
+}
+
+func (l multipleLockFile) RLock() {
+ for _, lock := range l.lockfiles {
+ lock.RLock()
+ }
+}
+
+func (l multipleLockFile) Unlock() {
+ for _, lock := range l.lockfiles {
+ lock.Unlock()
+ }
+}
+
+func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
+ // Look up only the first lockfile, since this is the value returned by RecordWrite().
+ return l.lockfiles[0].ModifiedSince(lastWrite)
+}
+
+func (l multipleLockFile) AssertLockedForWriting() {
+ for _, lock := range l.lockfiles {
+ lock.AssertLockedForWriting()
+ }
+}
+
+func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
+ return l.lockfiles[0].GetLastWrite()
+}
+
+func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
+ var lastWrite *lockfile.LastWrite
+ for _, lock := range l.lockfiles {
+ lw, err := lock.RecordWrite()
+ if err != nil {
+ return lw, err
+ }
+ // Return the first value we get so we know that
+ // all the locks have a write time >= to this one.
+ if lastWrite == nil {
+ lastWrite = &lw
+ }
+ }
+ return *lastWrite, nil
+}
+
+func (l multipleLockFile) IsReadWrite() bool {
+ return l.lockfiles[0].IsReadWrite()
+}
+
+func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
+ return &multipleLockFile{lockfiles: l}
}
type layerStore struct {
- lockfile Locker
- mountsLockfile Locker
- rundir string
- driver drivers.Driver
- layerdir string
- layers []*Layer
- idindex *truncindex.TruncIndex
- byid map[string]*Layer
- byname map[string]*Layer
- bymount map[string]*Layer
- bycompressedsum map[digest.Digest][]string
- byuncompressedsum map[digest.Digest][]string
- uidMap []idtools.IDMap
- gidMap []idtools.IDMap
- loadMut sync.Mutex
+ // The following fields are only set when constructing layerStore, and must never be modified afterwards.
+ // They are safe to access without any other locking.
+ lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
+ mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
+ rundir string
+ jsonPath [numLayerLocationIndex]string
+ layerdir string
+
+ inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
+ // The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
+ // Almost all users should use startReading() or startWriting().
+ lastWrite lockfile.LastWrite
+ mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite()
+ layers []*Layer
+ idindex *truncindex.TruncIndex
+ byid map[string]*Layer
+ byname map[string]*Layer
+ bymount map[string]*Layer
+ bycompressedsum map[digest.Digest][]string
+ byuncompressedsum map[digest.Digest][]string
+ bytocsum map[digest.Digest][]string
+ layerspathsModified [numLayerLocationIndex]time.Time
+
+ // FIXME: This field is only set when constructing layerStore, but locking rules of the driver
+ // interface itself are not documented here.
+ driver drivers.Driver
+}
+
+// The caller must hold r.inProcessLock for reading.
+func layerLocation(l *Layer) layerLocations {
+ if l.volatileStore {
+ return volatileLayerLocation
+ }
+ return stableLayerLocation
}
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
- Names: copyStringSlice(l.Names),
+ Names: copySlicePreferringNil(l.Names),
Parent: l.Parent,
Metadata: l.Metadata,
MountLabel: l.MountLabel,
@@ -302,17 +450,284 @@ func copyLayer(l *Layer) *Layer {
CompressedSize: l.CompressedSize,
UncompressedDigest: l.UncompressedDigest,
UncompressedSize: l.UncompressedSize,
+ TOCDigest: l.TOCDigest,
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
- BigDataNames: copyStringSlice(l.BigDataNames),
- Flags: copyStringInterfaceMap(l.Flags),
- UIDMap: copyIDMap(l.UIDMap),
- GIDMap: copyIDMap(l.GIDMap),
- UIDs: copyUint32Slice(l.UIDs),
- GIDs: copyUint32Slice(l.GIDs),
+ volatileStore: l.volatileStore,
+ BigDataNames: copySlicePreferringNil(l.BigDataNames),
+ Flags: copyMapPreferringNil(l.Flags),
+ UIDMap: copySlicePreferringNil(l.UIDMap),
+ GIDMap: copySlicePreferringNil(l.GIDMap),
+ UIDs: copySlicePreferringNil(l.UIDs),
+ GIDs: copySlicePreferringNil(l.GIDs),
+ }
+}
+
+// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+//
+// This is an internal implementation detail of layerStore construction, every other caller
+// should use startWriting() instead.
+func (r *layerStore) startWritingWithReload(canReload bool) error {
+ r.lockfile.Lock()
+ r.inProcessLock.Lock()
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+ }
+ }()
+
+ if canReload {
+ if _, err := r.reloadIfChanged(true); err != nil {
+ return err
+ }
}
+
+ succeeded = true
+ return nil
+}
+
+// startWriting makes sure the store is fresh, and locks it for writing.
+// If this succeeds, the caller MUST call stopWriting().
+func (r *layerStore) startWriting() error {
+ return r.startWritingWithReload(true)
}
+// stopWriting releases locks obtained by startWriting.
+func (r *layerStore) stopWriting() {
+ r.inProcessLock.Unlock()
+ r.lockfile.Unlock()
+}
+
+// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading.
+// If this succeeds, the caller MUST call stopReading().
+//
+// This is an internal implementation detail of layerStore construction, every other caller
+// should use startReading() instead.
+func (r *layerStore) startReadingWithReload(canReload bool) error {
+ // inProcessLocked calls the nested function with r.inProcessLock held for writing.
+ inProcessLocked := func(fn func() error) error {
+ r.inProcessLock.Lock()
+ defer r.inProcessLock.Unlock()
+ return fn()
+ }
+
+ r.lockfile.RLock()
+ unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
+ defer func() {
+ if unlockFn != nil {
+ unlockFn()
+ }
+ }()
+ r.inProcessLock.RLock()
+ unlockFn = r.stopReading
+
+ if canReload {
+ // If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
+ modified, err := r.modified()
+ if err != nil {
+ return err
+ }
+ if modified {
+ // We are unlucky, and need to reload.
+ // NOTE: Multiple goroutines can get to this place approximately simultaneously.
+ r.inProcessLock.RUnlock()
+ unlockFn = r.lockfile.Unlock
+
+ cleanupsDone := 0
+ for {
+ // First try reloading with r.lockfile held for reading.
+ // r.inProcessLock will serialize all goroutines that got here;
+ // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
+ var tryLockedForWriting bool
+ err := inProcessLocked(func() error {
+ var err error
+ tryLockedForWriting, err = r.reloadIfChanged(false)
+ return err
+ })
+ if err == nil {
+ break
+ }
+ if !tryLockedForWriting {
+ return err
+ }
+ if cleanupsDone >= maxLayerStoreCleanupIterations {
+ return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
+ }
+ // Not good enough, we need r.lockfile held for writing. So, let’s do that.
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.Lock()
+ unlockFn = r.lockfile.Unlock
+ if err := inProcessLocked(func() error {
+ _, err := r.reloadIfChanged(true)
+ return err
+ }); err != nil {
+ return err
+ }
+ unlockFn()
+ unlockFn = nil
+
+ r.lockfile.RLock()
+ unlockFn = r.lockfile.Unlock
+ // We need to check for a reload again because the on-disk state could have been modified
+ // after we released the lock.
+ cleanupsDone++
+ }
+
+ // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because
+ // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
+ // protects us against in-process writers modifying data.
+ // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
+ // and 2) access to the in-memory data is not racy;
+ // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
+
+ r.inProcessLock.RLock()
+ }
+ }
+
+ unlockFn = nil
+ return nil
+}
+
+// startReading makes sure the store is fresh, and locks it for reading.
+// If this succeeds, the caller MUST call stopReading().
+func (r *layerStore) startReading() error {
+ return r.startReadingWithReload(true)
+}
+
+// stopReading releases locks obtained by startReading.
+func (r *layerStore) stopReading() {
+ r.inProcessLock.RUnlock()
+ r.lockfile.Unlock()
+}
+
+// modified returns true if the on-disk state (of layers or mounts) has changed (ie if reloadIcHanged may need to modify the store)
+//
+// Note that unlike containerStore.modified and imageStore.modified, this function is not directly used in layerStore.reloadIfChanged();
+// it exists only to help the reader ensure it has fresh enough state.
+//
+// The caller must hold r.lockfile for reading _or_ writing.
+// The caller must hold r.inProcessLock for reading or writing.
+func (r *layerStore) modified() (bool, error) {
+ _, m, err := r.layersModified()
+ if err != nil {
+ return false, err
+ }
+ if m {
+ return true, nil
+ }
+ if r.lockfile.IsReadWrite() {
+ // This means we get, release, and re-obtain, r.mountsLockfile if we actually need to do any kind of reload.
+ // That’s a bit expensive, but hopefully most callers will be read-only and see no changes.
+ // We can’t eliminate these mountsLockfile accesses given the current assumption that Layer objects have _some_ not-very-obsolete
+ // mount data. Maybe we can segregate the mount-dependent and mount-independent operations better...
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ _, m, err := r.mountsModified()
+ if err != nil {
+ return false, err
+ }
+ if m {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the
+// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful
+// reload.
+// It should only be called with the lock held.
+// The caller must hold r.inProcessLock for reading.
+func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
+ lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite)
+ if err != nil {
+ return lockfile.LastWrite{}, modified, err
+ }
+ if modified {
+ return lastWrite, true, nil
+ }
+
+ // If the layers.json file or container-layers.json has been
+ // modified manually, then we have to reload the storage in
+ // any case.
+ for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
+ info, err := os.Stat(r.jsonPath[locationIndex])
+ if err != nil && !os.IsNotExist(err) {
+ return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
+ }
+ if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] {
+ // In this case the LastWrite value is equal to r.lastWrite; writing it back doesn’t hurt.
+ return lastWrite, true, nil
+ }
+ }
+
+ return lockfile.LastWrite{}, false, nil
+}
+
+// reloadIfChanged reloads the contents of the store from disk if it is changed.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+//
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// reloadIfChanged() with lockedForWriting could succeed.
+func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
+ lastWrite, layersModified, err := r.layersModified()
+ if err != nil {
+ return false, err
+ }
+ if layersModified {
+ // r.load also reloads mounts data; so, on this path, we don’t need to call reloadMountsIfChanged.
+ if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
+ return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
+ }
+ r.lastWrite = lastWrite
+ return false, nil
+ }
+ if r.lockfile.IsReadWrite() {
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ if err := r.reloadMountsIfChanged(); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+}
+
+// mountsModified returns true if the on-disk mount state has changed (i.e. if reloadMountsIfChanged may need to modify the store),
+// and a lockfile.LastWrite value for that update.
+//
+// The caller must hold r.mountsLockfile for reading _or_ writing.
+// The caller must hold r.inProcessLock for reading or writing.
+func (r *layerStore) mountsModified() (lockfile.LastWrite, bool, error) {
+ return r.mountsLockfile.ModifiedSince(r.mountsLastWrite)
+}
+
+// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed.
+//
+// The caller must hold r.mountsLockFile for reading or writing.
+func (r *layerStore) reloadMountsIfChanged() error {
+ lastWrite, modified, err := r.mountsModified()
+ if err != nil {
+ return err
+ }
+ if modified {
+ if err = r.loadMounts(); err != nil {
+ return err
+ }
+ r.mountsLastWrite = lastWrite
+ }
+ return nil
+}
+
+// Requires startReading or startWriting.
func (r *layerStore) Layers() ([]Layer, error) {
layers := make([]Layer, len(r.layers))
for i := range r.layers {
@@ -321,181 +736,327 @@ func (r *layerStore) Layers() ([]Layer, error) {
return layers, nil
}
+// Requires startWriting.
+func (r *layerStore) GarbageCollect() error {
+ layers, err := r.driver.ListLayers()
+ if err != nil {
+ if errors.Is(err, drivers.ErrNotSupported) {
+ return nil
+ }
+ return err
+ }
+
+ for _, id := range layers {
+ // Is the id still referenced
+ if r.byid[id] != nil {
+ continue
+ }
+
+ // Remove layer and any related data of unreferenced id
+ if err := r.driver.Remove(id); err != nil {
+ logrus.Debugf("removing driver layer %q", id)
+ return err
+ }
+
+ logrus.Debugf("removing %q", r.tspath(id))
+ os.Remove(r.tspath(id))
+ logrus.Debugf("removing %q", r.datadir(id))
+ os.RemoveAll(r.datadir(id))
+ }
+ return nil
+}
+
func (r *layerStore) mountspath() string {
return filepath.Join(r.rundir, "mountpoints.json")
}
-func (r *layerStore) layerspath() string {
- return filepath.Join(r.layerdir, "layers.json")
-}
+// load reloads the contents of the store from disk.
+//
+// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
+// manage r.lastWrite.
+//
+// As a side effect, this sets r.mountsLastWrite.
+//
+// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
+// if it is held for writing.
+// The caller must hold r.inProcessLock for WRITING.
+//
+// If !lockedForWriting and this function fails, the return value indicates whether
+// retrying with lockedForWriting could succeed.
+func (r *layerStore) load(lockedForWriting bool) (bool, error) {
+ var modifiedLocations layerLocations
-func (r *layerStore) Load() error {
- shouldSave := false
- rpath := r.layerspath()
- data, err := ioutil.ReadFile(rpath)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
layers := []*Layer{}
- idlist := []string{}
ids := make(map[string]*Layer)
- names := make(map[string]*Layer)
- compressedsums := make(map[digest.Digest][]string)
- uncompressedsums := make(map[digest.Digest][]string)
- if r.IsReadWrite() {
- label.ClearLabels()
- }
- if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
- idlist = make([]string, 0, len(layers))
- for n, layer := range layers {
- ids[layer.ID] = layers[n]
- idlist = append(idlist, layer.ID)
- for _, name := range layer.Names {
- if conflict, ok := names[name]; ok {
- r.removeName(conflict, name)
- shouldSave = true
- }
- names[name] = layers[n]
+
+ for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
+ location := layerLocationFromIndex(locationIndex)
+ rpath := r.jsonPath[locationIndex]
+ info, err := os.Stat(rpath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return false, err
}
- if layer.CompressedDigest != "" {
- compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
+ } else {
+ r.layerspathsModified[locationIndex] = info.ModTime()
+ }
+ data, err := os.ReadFile(rpath)
+ if err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+
+ locationLayers := []*Layer{}
+ if len(data) != 0 {
+ if err := json.Unmarshal(data, &locationLayers); err != nil {
+ return false, fmt.Errorf("loading %q: %w", rpath, err)
+ }
+ }
+
+ for _, layer := range locationLayers {
+ // There should be no duplicated ids between json files, but lets check to be sure
+ if ids[layer.ID] != nil {
+ continue // skip invalid duplicated layer
}
- if layer.UncompressedDigest != "" {
- uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
+ // Remember where the layer came from
+ if location == volatileLayerLocation {
+ layer.volatileStore = true
}
- if layer.MountLabel != "" {
- label.ReserveLabel(layer.MountLabel)
+ layers = append(layers, layer)
+ ids[layer.ID] = layer
+ }
+ }
+
+ idlist := make([]string, 0, len(layers))
+ names := make(map[string]*Layer)
+ compressedsums := make(map[digest.Digest][]string)
+ uncompressedsums := make(map[digest.Digest][]string)
+ tocsums := make(map[digest.Digest][]string)
+ var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
+ if r.lockfile.IsReadWrite() {
+ selinux.ClearLabels()
+ }
+ for n, layer := range layers {
+ idlist = append(idlist, layer.ID)
+ for _, name := range layer.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ errorToResolveBySaving = ErrDuplicateLayerNames
+ modifiedLocations |= layerLocation(conflict)
}
- layer.ReadOnly = !r.IsReadWrite()
+ names[name] = layers[n]
+ }
+ if layer.CompressedDigest != "" {
+ compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
+ }
+ if layer.UncompressedDigest != "" {
+ uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
+ }
+ if layer.TOCDigest != "" {
+ tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID)
+ }
+ if layer.MountLabel != "" {
+ selinux.ReserveLabel(layer.MountLabel)
+ }
+ layer.ReadOnly = !r.lockfile.IsReadWrite()
+ // The r.lockfile.IsReadWrite() condition maintains past practice:
+ // Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store
+ // (OTOH creating child layers on top would probably lead to problems?).
+ // We do remove incomplete layers in read-write stores so that we don’t build on top of them.
+ if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() {
+ errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up")
}
- err = nil
}
- if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
- return ErrDuplicateLayerNames
+
+ if errorToResolveBySaving != nil {
+ if !r.lockfile.IsReadWrite() {
+ return false, errorToResolveBySaving
+ }
+ if !lockedForWriting {
+ return true, errorToResolveBySaving
+ }
}
r.layers = layers
- r.idindex = truncindex.NewTruncIndex(idlist)
+ r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
r.byid = ids
r.byname = names
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
+ r.bytocsum = tocsums
// Load and merge information about which layers are mounted, and where.
- if r.IsReadWrite() {
+ if r.lockfile.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
- if err = r.loadMounts(); err != nil {
- return err
+ // We need to reload mounts unconditionally, becuause by creating r.layers from scratch, we have discarded the previous
+ // information, if any. So, obtain a fresh mountsLastWrite value so that we don’t unnecessarily reload the data
+ // afterwards.
+ mountsLastWrite, err := r.mountsLockfile.GetLastWrite()
+ if err != nil {
+ return false, err
}
+ if err := r.loadMounts(); err != nil {
+ return false, err
+ }
+ r.mountsLastWrite = mountsLastWrite
+ // NOTE: We will release mountsLockfile when this function returns, so unlike most of the layer data, the
+ // r.layers[].MountPoint, r.layers[].MountCount, and r.bymount values might not reflect
+ // true on-filesystem state already by the time this function returns.
+ // Code that needs the state to be accurate must lock r.mountsLockfile again,
+ // and possibly loadMounts() again.
+ }
- // Last step: as we’re writable, try to remove anything that a previous
+ if errorToResolveBySaving != nil {
+ if !r.lockfile.IsReadWrite() {
+ return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite")
+ }
+ // Last step: try to remove anything that a previous
// user of this storage area marked for deletion but didn't manage to
// actually delete.
- if r.Locked() {
- for _, layer := range r.layers {
- if layer.Flags == nil {
- layer.Flags = make(map[string]interface{})
- }
- if cleanup, ok := layer.Flags[incompleteFlag]; ok {
- if b, ok := cleanup.(bool); ok && b {
- err = r.deleteInternal(layer.ID)
- if err != nil {
- break
- }
- shouldSave = true
- }
+ var incompleteDeletionErrors error // = nil
+ for _, layer := range r.layers {
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ if layerHasIncompleteFlag(layer) {
+ logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
+ err := r.deleteInternal(layer.ID)
+ if err != nil {
+ // Don't return the error immediately, because deleteInternal does not saveLayers();
+ // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
+ // deleted incomplete layers have their metadata correctly removed.
+ incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
+ fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
+ modifiedLocations |= layerLocation(layer)
}
}
- if shouldSave {
- return r.saveLayers()
+ if err := r.saveLayers(modifiedLocations); err != nil {
+ return false, err
+ }
+ if incompleteDeletionErrors != nil {
+ return false, incompleteDeletionErrors
}
}
-
- return err
-}
-
-func (r *layerStore) LoadLocked() error {
- r.lockfile.Lock()
- defer r.lockfile.Unlock()
- return r.Load()
+ return false, nil
}
+// The caller must hold r.mountsLockfile for reading or writing.
+// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
- data, err := ioutil.ReadFile(mpath)
+ data, err := os.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layerMounts := []layerMountPoint{}
- if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
- // Clear all of our mount information. If another process
- // unmounted something, it (along with its zero count) won't
- // have been encoded into the version of mountpoints.json that
- // we're loading, so our count could fall out of sync with it
- // if we don't, and if we subsequently change something else,
- // we'd pass that error along to other process that reloaded
- // the data after we saved it.
- for _, layer := range r.layers {
- layer.MountPoint = ""
- layer.MountCount = 0
- }
- // All of the non-zero count values will have been encoded, so
- // we reset the still-mounted ones based on the contents.
- for _, mount := range layerMounts {
- if mount.MountPoint != "" {
- if layer, ok := r.lookup(mount.ID); ok {
- mounts[mount.MountPoint] = layer
- layer.MountPoint = mount.MountPoint
- layer.MountCount = mount.MountCount
- }
+ if len(data) != 0 {
+ if err := json.Unmarshal(data, &layerMounts); err != nil {
+ return err
+ }
+ }
+ // Clear all of our mount information. If another process
+ // unmounted something, it (along with its zero count) won't
+ // have been encoded into the version of mountpoints.json that
+ // we're loading, so our count could fall out of sync with it
+ // if we don't, and if we subsequently change something else,
+ // we'd pass that error along to other process that reloaded
+ // the data after we saved it.
+ for _, layer := range r.layers {
+ layer.MountPoint = ""
+ layer.MountCount = 0
+ }
+ // All of the non-zero count values will have been encoded, so
+ // we reset the still-mounted ones based on the contents.
+ for _, mount := range layerMounts {
+ if mount.MountPoint != "" {
+ if layer, ok := r.lookup(mount.ID); ok {
+ mounts[mount.MountPoint] = layer
+ layer.MountPoint = mount.MountPoint
+ layer.MountCount = mount.MountCount
}
}
- err = nil
}
r.bymount = mounts
- return err
+ return nil
}
-func (r *layerStore) Save() error {
+// save saves the contents of the store to disk.
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for WRITING.
+func (r *layerStore) save(saveLocations layerLocations) error {
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
- defer r.mountsLockfile.Touch()
- if err := r.saveLayers(); err != nil {
+ if err := r.saveLayers(saveLocations); err != nil {
return err
}
return r.saveMounts()
}
-func (r *layerStore) saveLayers() error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
- }
- if !r.Locked() {
- return errors.New("layer store is not locked for writing")
- }
- rpath := r.layerspath()
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
- return err
+// saveFor saves the contents of the store relevant for modifiedLayer to disk.
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for WRITING.
+func (r *layerStore) saveFor(modifiedLayer *Layer) error {
+ return r.save(layerLocation(modifiedLayer))
+}
+
+// The caller must hold r.lockfile locked for writing.
+// The caller must hold r.inProcessLock for WRITING.
+func (r *layerStore) saveLayers(saveLocations layerLocations) error {
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
- jldata, err := json.Marshal(&r.layers)
+ r.lockfile.AssertLockedForWriting()
+
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
- defer r.Touch()
- return ioutils.AtomicWriteFile(rpath, jldata, 0600)
+ r.lastWrite = lw
+
+ for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
+ location := layerLocationFromIndex(locationIndex)
+ if location&saveLocations == 0 {
+ continue
+ }
+ rpath := r.jsonPath[locationIndex]
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
+ return err
+ }
+ subsetLayers := make([]*Layer, 0, len(r.layers))
+ for _, layer := range r.layers {
+ if layerLocation(layer) == location {
+ subsetLayers = append(subsetLayers, layer)
+ }
+ }
+
+ jldata, err := json.Marshal(&subsetLayers)
+ if err != nil {
+ return err
+ }
+ opts := ioutils.AtomicFileWriterOptions{}
+ if location == volatileLayerLocation {
+ opts.NoSync = true
+ }
+ if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil {
+ return err
+ }
+ r.layerspathsModified[locationIndex] = opts.ModTime
+ }
+ return nil
}
+// The caller must hold r.mountsLockfile for writing.
+// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveMounts() error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
- }
- if !r.mountsLockfile.Locked() {
- return errors.New("layer store mount information is not locked for writing")
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
+ r.mountsLockfile.AssertLockedForWriting()
mpath := r.mountspath()
- if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil {
return err
}
mounts := make([]layerMountPoint, 0, len(r.layers))
@@ -512,66 +1073,129 @@ func (r *layerStore) saveMounts() error {
if err != nil {
return err
}
- if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
+
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.mountsLockfile.RecordWrite()
+ if err != nil {
+ return err
+ }
+ r.mountsLastWrite = lw
+
+ if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil {
return err
}
return r.loadMounts()
}
-func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) {
- if err := os.MkdirAll(rundir, 0700); err != nil {
+func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
+ if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
- if err := os.MkdirAll(layerdir, 0700); err != nil {
+ if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
- lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
+ if imagedir != "" {
+ if err := os.MkdirAll(imagedir, 0o700); err != nil {
+ return nil, err
+ }
+ }
+ // Note: While the containers.lock file is in rundir for transient stores
+ // we don't want to do this here, because the non-transient layers in
+ // layers.json might be used externally as a read-only layer (using e.g.
+ // additionalimagestores), and that would look for the lockfile in the
+ // same directory
+ var lockFiles []*lockfile.LockFile
+ lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
- mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
+ lockFiles = append(lockFiles, lockFile)
+ if imagedir != "" {
+ lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockFiles = append(lockFiles, lockFile)
+ }
+
+ mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
}
+ volatileDir := layerdir
+ if transient {
+ volatileDir = rundir
+ }
rlstore := layerStore{
- lockfile: lockfile,
+ lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
- driver: driver,
rundir: rundir,
- layerdir: layerdir,
- byid: make(map[string]*Layer),
- bymount: make(map[string]*Layer),
- byname: make(map[string]*Layer),
- uidMap: copyIDMap(s.uidMap),
- gidMap: copyIDMap(s.gidMap),
- }
- if err := rlstore.Load(); err != nil {
+ jsonPath: [numLayerLocationIndex]string{
+ filepath.Join(layerdir, "layers.json"),
+ filepath.Join(volatileDir, "volatile-layers.json"),
+ },
+ layerdir: layerdir,
+
+ byid: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+
+ driver: driver,
+ }
+ if err := rlstore.startWritingWithReload(false); err != nil {
+ return nil, err
+ }
+ defer rlstore.stopWriting()
+ lw, err := rlstore.lockfile.GetLastWrite()
+ if err != nil {
+ return nil, err
+ }
+ rlstore.lastWrite = lw
+ // rlstore.mountsLastWrite is initialized inside rlstore.load().
+ if _, err := rlstore.load(true); err != nil {
return nil, err
}
return &rlstore, nil
}
-func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
- lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
+func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) {
+ lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
rlstore := layerStore{
- lockfile: lockfile,
+ lockfile: newMultipleLockFile(lockfile),
mountsLockfile: nil,
- driver: driver,
rundir: rundir,
- layerdir: layerdir,
- byid: make(map[string]*Layer),
- bymount: make(map[string]*Layer),
- byname: make(map[string]*Layer),
+ jsonPath: [numLayerLocationIndex]string{
+ filepath.Join(layerdir, "layers.json"),
+ filepath.Join(layerdir, "volatile-layers.json"),
+ },
+ layerdir: layerdir,
+
+ byid: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+
+ driver: driver,
+ }
+ if err := rlstore.startReadingWithReload(false); err != nil {
+ return nil, err
}
- if err := rlstore.Load(); err != nil {
+ defer rlstore.stopReading()
+ lw, err := rlstore.lockfile.GetLastWrite()
+ if err != nil {
+ return nil, err
+ }
+ rlstore.lastWrite = lw
+ if _, err := rlstore.load(false); err != nil {
return nil, err
}
return &rlstore, nil
}
+// Requires startReading or startWriting.
func (r *layerStore) lookup(id string) (*Layer, bool) {
if layer, ok := r.byid[id]; ok {
return layer, ok
@@ -584,6 +1208,7 @@ func (r *layerStore) lookup(id string) (*Layer, bool) {
return nil, false
}
+// Requires startReading or startWriting.
func (r *layerStore) Size(name string) (int64, error) {
layer, ok := r.lookup(name)
if !ok {
@@ -592,27 +1217,29 @@ func (r *layerStore) Size(name string) (int64, error) {
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
// created by a version of this library that didn't keep track of digest and size information).
- if layer.UncompressedDigest != "" {
- return layer.UncompressedSize, nil
+ if layer.UncompressedDigest != "" || layer.TOCDigest != "" {
+ return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set
}
return -1, nil
}
+// Requires startWriting.
func (r *layerStore) ClearFlag(id string, flag string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
delete(layer.Flags, flag)
- return r.Save()
+ return r.saveFor(layer)
}
+// Requires startWriting.
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -622,13 +1249,14 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
layer.Flags = make(map[string]interface{})
}
layer.Flags[flag] = value
- return r.Save()
+ return r.saveFor(layer)
}
func (r *layerStore) Status() ([][2]string, error) {
return r.driver.Status(), nil
}
+// Requires startWriting.
func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) {
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, ErrDuplicateID
@@ -663,7 +1291,11 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
// TODO: check if necessary fields are filled
r.layers = append(r.layers, layer)
- r.idindex.Add(id)
+ // This can only fail on duplicate IDs, which shouldn’t happen — and in
+ // that case the index is already in the desired state anyway.
+ // Implementing recovery from an unlikely and unimportant failure here
+ // would be too risky.
+ _ = r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names { // names got from the additional layer store won't be used
r.byname[name] = layer
@@ -672,24 +1304,32 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
- r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID)
+ r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
+ }
+ if layer.TOCDigest != "" {
+ r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
}
- if err := r.Save(); err != nil {
- r.driver.Remove(id)
+ if err := r.saveFor(layer); err != nil {
+ if e := r.Delete(layer.ID); e != nil {
+ logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
+ }
return nil, err
}
return copyLayer(layer), nil
}
-func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
- if !r.IsReadWrite() {
- return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
+// Requires startWriting.
+func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
+ if moreOptions == nil {
+ moreOptions = &LayerOptions{}
}
- size = -1
- if err := os.MkdirAll(r.rundir, 0700); err != nil {
+ if !r.lockfile.IsReadWrite() {
+ return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
+ }
+ if err := os.MkdirAll(r.rundir, 0o700); err != nil {
return nil, -1, err
}
- if err := os.MkdirAll(r.layerdir, 0700); err != nil {
+ if err := os.MkdirAll(r.layerdir, 0o700); err != nil {
return nil, -1, err
}
if id == "" {
@@ -703,7 +1343,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, -1, ErrDuplicateID
}
- names = dedupeNames(names)
+ names = dedupeStrings(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, -1, ErrDuplicateName
@@ -713,169 +1353,231 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if parentLayer != nil {
parent = parentLayer.ID
}
- var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
+ var (
+ templateIDMappings *idtools.IDMappings
+ templateMetadata string
+ templateCompressedDigest digest.Digest
+ templateCompressedSize int64
+ templateUncompressedDigest digest.Digest
+ templateTOCDigest digest.Digest
+ templateUncompressedSize int64
+ templateCompressionType archive.Compression
+ templateUIDs, templateGIDs []uint32
+ templateTSdata []byte
+ )
if moreOptions.TemplateLayer != "" {
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
if !ok {
return nil, -1, ErrLayerUnknown
}
+ templateMetadata = templateLayer.Metadata
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
+ templateTOCDigest = templateLayer.TOCDigest
+ templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
+ templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
+ templateCompressionType = templateLayer.CompressionType
+ templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs)
+ templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, -1, err
+ }
} else {
templateIDMappings = &idtools.IDMappings{}
}
- if parentLayer != nil {
- parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
- } else {
- parentMappings = &idtools.IDMappings{}
- }
if mountLabel != "" {
- label.ReserveLabel(mountLabel)
+ selinux.ReserveLabel(mountLabel)
+ }
+
+ // Before actually creating the layer, make a persistent record of it
+ // with the incomplete flag set, so that future processes have a chance
+ // to clean up after it.
+ layer = &Layer{
+ ID: id,
+ Parent: parent,
+ Names: names,
+ MountLabel: mountLabel,
+ Metadata: templateMetadata,
+ Created: time.Now().UTC(),
+ CompressedDigest: templateCompressedDigest,
+ CompressedSize: templateCompressedSize,
+ UncompressedDigest: templateUncompressedDigest,
+ TOCDigest: templateTOCDigest,
+ UncompressedSize: templateUncompressedSize,
+ CompressionType: templateCompressionType,
+ UIDs: templateUIDs,
+ GIDs: templateGIDs,
+ Flags: newMapFrom(moreOptions.Flags),
+ UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
+ GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
+ BigDataNames: []string{},
+ volatileStore: moreOptions.Volatile,
+ }
+ layer.Flags[incompleteFlag] = true
+
+ r.layers = append(r.layers, layer)
+ // This can only fail if the ID is already missing, which shouldn’t
+ // happen — and in that case the index is already in the desired state
+ // anyway. This is on various paths to recover from failures, so this
+ // should be robust against partially missing data.
+ _ = r.idindex.Add(id)
+ r.byid[id] = layer
+ for _, name := range names {
+ r.byname[name] = layer
}
+
+ cleanupFailureContext := ""
+ defer func() {
+ if err != nil {
+ // now that the in-memory structures know about the new
+ // record, we can use regular Delete() to clean up if
+ // anything breaks from here on out
+ if cleanupFailureContext == "" {
+ cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
+ }
+ if e := r.Delete(id); e != nil {
+ logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e)
+ }
+ }
+ }()
+
+ if err = r.saveFor(layer); err != nil {
+ cleanupFailureContext = "saving incomplete layer metadata"
+ return nil, -1, err
+ }
+
+ for _, item := range moreOptions.BigData {
+ if err = r.setBigData(layer, item.Key, item.Data); err != nil {
+ cleanupFailureContext = fmt.Sprintf("saving big data item %q", item.Key)
+ return nil, -1, err
+ }
+ }
+
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
StorageOpt: options,
IDMappings: idMappings,
}
+
+ var parentMappings, oldMappings *idtools.IDMappings
+ if parentLayer != nil {
+ parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
+ } else {
+ parentMappings = &idtools.IDMappings{}
+ }
if moreOptions.TemplateLayer != "" {
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
- }
- return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer)
+ cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer)
+ return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
- }
- return nil, -1, errors.Wrapf(err, "error creating read-write layer")
+ cleanupFailureContext = "creating a read-write layer"
+ return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
}
} else {
if err = r.driver.Create(id, parent, &opts); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
- }
- return nil, -1, errors.Wrapf(err, "error creating layer")
+ cleanupFailureContext = "creating a read-only layer"
+ return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err)
}
}
- oldMappings = parentMappings
- }
- if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
- if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
+ if parentLayer != nil {
+ oldMappings = parentMappings
+ }
+ }
+
+ if oldMappings != nil &&
+ (!reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs())) {
+ if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
+ cleanupFailureContext = "in UpdateLayerIDMap"
+ return nil, -1, err
+ }
+ }
+
+ if len(templateTSdata) > 0 {
+ if err = os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
+ cleanupFailureContext = "creating tar-split parent directory for a copy from template"
+ return nil, -1, err
+ }
+ if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
+ cleanupFailureContext = "creating a tar-split copy from template"
+ return nil, -1, err
+ }
+ }
+
+ size = -1
+ if diff != nil {
+ if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil {
+ cleanupFailureContext = "applying layer diff"
+ return nil, -1, err
+ }
+ } else if slo != nil {
+ if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
+ cleanupFailureContext = "applying staged directory diff"
return nil, -1, err
}
- }
- if err == nil {
- layer = &Layer{
- ID: id,
- Parent: parent,
- Names: names,
- MountLabel: mountLabel,
- Created: time.Now().UTC(),
- Flags: make(map[string]interface{}),
- UIDMap: copyIDMap(moreOptions.UIDMap),
- GIDMap: copyIDMap(moreOptions.GIDMap),
- BigDataNames: []string{},
- }
- r.layers = append(r.layers, layer)
- r.idindex.Add(id)
- r.byid[id] = layer
- for _, name := range names {
- r.byname[name] = layer
- }
- for flag, value := range flags {
- layer.Flags[flag] = value
- }
- if diff != nil {
- layer.Flags[incompleteFlag] = true
- err = r.Save()
- if err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
- return nil, -1, err
- }
- size, err = r.ApplyDiff(layer.ID, diff)
- if err != nil {
- if r.Delete(layer.ID) != nil {
- // Either a driver error or an error saving.
- // We now have a layer that's been marked for
- // deletion but which we failed to remove.
- }
- return nil, -1, err
- }
- delete(layer.Flags, incompleteFlag)
+ } else {
+ // applyDiffWithOptions() would have updated r.bycompressedsum
+ // and r.byuncompressedsum for us, but if we used a template
+ // layer, we didn't call it, so add the new layer as candidates
+ // for searches for layers by checksum
+ if layer.CompressedDigest != "" {
+ r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
- err = r.Save()
- if err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
- return nil, -1, err
+ if layer.UncompressedDigest != "" {
+ r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
+ }
+ if layer.TOCDigest != "" {
+ r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
}
- layer = copyLayer(layer)
}
- return layer, size, err
-}
-func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
- layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
- return layer, err
-}
+ delete(layer.Flags, incompleteFlag)
+ if err = r.saveFor(layer); err != nil {
+ cleanupFailureContext = "saving finished layer metadata"
+ return nil, -1, err
+ }
-func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
- return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
+ layer = copyLayer(layer)
+ return layer, size, err
}
+// Requires startReading or startWriting.
func (r *layerStore) Mounted(id string) (int, error) {
- if !r.IsReadWrite() {
- return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
- }
- r.mountsLockfile.RLock()
- defer r.mountsLockfile.Unlock()
- if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
- if err = r.loadMounts(); err != nil {
- return 0, err
- }
+ if !r.lockfile.IsReadWrite() {
+ return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return 0, ErrLayerUnknown
}
+ // NOTE: The caller of this function is not holding (currently cannot hold) r.mountsLockfile,
+ // so the data is necessarily obsolete by the time this function returns. So, we don’t even
+ // try to reload it in this function, we just rely on r.load() that happened during
+ // r.startReading() or r.startWriting().
return layer.MountCount, nil
}
+// Requires startWriting.
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
-
- // check whether options include ro option
- hasReadOnlyOpt := func(opts []string) bool {
- for _, item := range opts {
- if item == "ro" {
- return true
- }
- }
- return false
- }
+ // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter
+ // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies
+ // - r.layers[].MountCount (directly and via loadMounts / saveMounts)
+ // - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
+ // - r.bymount (via loadMounts / saveMounts)
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
- if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
- return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
+ return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
- if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
- if err = r.loadMounts(); err != nil {
- return "", err
- }
+ if err := r.reloadMountsIfChanged(); err != nil {
+ return "", err
}
- defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
return "", ErrLayerUnknown
@@ -915,18 +1617,22 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
return mountpoint, err
}
-func (r *layerStore) Unmount(id string, force bool) (bool, error) {
- if !r.IsReadWrite() {
- return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+// Requires startWriting.
+func (r *layerStore) unmount(id string, force bool, conditional bool) (bool, error) {
+ // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter → simpleGetCloser.Close()
+ // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies
+ // - r.layers[].MountCount (directly and via loadMounts / saveMounts)
+ // - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
+ // - r.bymount (via loadMounts / saveMounts)
+
+ if !r.lockfile.IsReadWrite() {
+ return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
- if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
- if err = r.loadMounts(); err != nil {
- return false, err
- }
+ if err := r.reloadMountsIfChanged(); err != nil {
+ return false, err
}
- defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)]
@@ -935,6 +1641,9 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
layer = layerByMount
}
+ if conditional && layer.MountCount == 0 {
+ return false, ErrLayerNotMounted
+ }
if force {
layer.MountCount = 1
}
@@ -954,17 +1663,16 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
return true, err
}
+// Requires startReading or startWriting.
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
- if !r.IsReadWrite() {
- return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ if !r.lockfile.IsReadWrite() {
+ return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
- if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
- if err = r.loadMounts(); err != nil {
- return nil, nil, err
- }
- }
+ // We are not checking r.mountsLockfile.Modified() and calling r.loadMounts here because the store
+ // is only locked for reading = we are not allowed to modify layer data.
+ // Holding r.mountsLockfile protects us against concurrent mount/unmount operations.
layer, ok := r.lookup(id)
if !ok {
return nil, nil, ErrLayerUnknown
@@ -977,9 +1685,16 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
// We don't know which directories to examine.
return nil, nil, ErrLayerNotMounted
}
+ // Holding r.mountsLockfile protects us against concurrent mount/unmount operations, but we didn’t
+ // hold it continuously since the time we loaded the mount data; so it’s possible the layer
+ // was unmounted in the meantime, or mounted elsewhere. Treat that as if we were run after the unmount,
+ // = a missing mount, not a filesystem error.
+ if _, err := system.Lstat(layer.MountPoint); errors.Is(err, os.ErrNotExist) {
+ return nil, nil, ErrLayerNotMounted
+ }
rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID)
+ return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err)
}
m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
fsuids := make(map[int]struct{})
@@ -987,7 +1702,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
st, err := system.Stat(dir)
if err != nil {
- return nil, nil, errors.Wrap(err, "read directory ownership")
+ return nil, nil, fmt.Errorf("read directory ownership: %w", err)
}
lst, err := system.Lstat(dir)
if err != nil {
@@ -1025,29 +1740,36 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
return uids, gids, nil
}
+// Requires startWriting.
func (r *layerStore) removeName(layer *Layer, name string) {
layer.Names = stringSliceWithoutValue(layer.Names, name)
}
-func (r *layerStore) SetNames(id string, names []string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
+// Requires startWriting.
+func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
- names = dedupeNames(names)
- if layer, ok := r.lookup(id); ok {
- for _, name := range layer.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherLayer, ok := r.byname[name]; ok {
- r.removeName(otherLayer, name)
- }
- r.byname[name] = layer
+ layer, ok := r.lookup(id)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ oldNames := layer.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherLayer, ok := r.byname[name]; ok {
+ r.removeName(otherLayer, name)
}
- layer.Names = names
- return r.Save()
+ r.byname[name] = layer
}
- return ErrLayerUnknown
+ layer.Names = names
+ return r.saveFor(layer)
}
func (r *layerStore) datadir(id string) string {
@@ -1058,29 +1780,35 @@ func (r *layerStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
+// Requires startReading or startWriting.
func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName)
}
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
return os.Open(r.datapath(layer.ID, key))
}
+// Requires startWriting.
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
- if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item")
- }
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id)
+ return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
+ }
+ return r.setBigData(layer, key, data)
+}
+
+func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error {
+ if key == "" {
+ return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
- err := os.MkdirAll(r.datadir(layer.ID), 0700)
+ err := os.MkdirAll(r.datadir(layer.ID), 0o700)
if err != nil {
return err
}
@@ -1088,42 +1816,37 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
// BigData() relies on this behaviour when opening the file for read
// so that it is either accessing the old data or the new one.
- writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
+ writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600)
if err != nil {
- return errors.Wrapf(err, "error opening bigdata file")
+ return fmt.Errorf("opening bigdata file: %w", err)
}
if _, err := io.Copy(writer, data); err != nil {
writer.Close()
- return errors.Wrapf(err, "error copying bigdata for the layer")
+ return fmt.Errorf("copying bigdata for the layer: %w", err)
}
if err := writer.Close(); err != nil {
- return errors.Wrapf(err, "error closing bigdata file for the layer")
+ return fmt.Errorf("closing bigdata file for the layer: %w", err)
}
- addName := true
- for _, name := range layer.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(layer.BigDataNames, key) {
layer.BigDataNames = append(layer.BigDataNames, key)
- return r.Save()
+ return r.saveFor(layer)
}
return nil
}
+// Requires startReading or startWriting.
func (r *layerStore) BigDataNames(id string) ([]string, error) {
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id)
+ return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown)
}
- return copyStringSlice(layer.BigDataNames), nil
+ return copySlicePreferringNil(layer.BigDataNames), nil
}
+// Requires startReading or startWriting.
func (r *layerStore) Metadata(id string) (string, error) {
if layer, ok := r.lookup(id); ok {
return layer.Metadata, nil
@@ -1131,13 +1854,14 @@ func (r *layerStore) Metadata(id string) (string, error) {
return "", ErrLayerUnknown
}
+// Requires startWriting.
func (r *layerStore) SetMetadata(id, metadata string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
- return r.Save()
+ return r.saveFor(layer)
}
return ErrLayerUnknown
}
@@ -1146,84 +1870,89 @@ func (r *layerStore) tspath(id string) string {
return filepath.Join(r.layerdir, id+tarSplitSuffix)
}
+// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
+// The caller must hold r.inProcessLock for reading.
+func layerHasIncompleteFlag(layer *Layer) bool {
+ if layer.Flags == nil {
+ return false
+ }
+ if flagValue, ok := layer.Flags[incompleteFlag]; ok {
+ if b, ok := flagValue.(bool); ok && b {
+ return true
+ }
+ }
+ return false
+}
+
+// Requires startWriting.
func (r *layerStore) deleteInternal(id string) error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
+ // Ensure that if we are interrupted, the layer will be cleaned up.
+ if !layerHasIncompleteFlag(layer) {
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ layer.Flags[incompleteFlag] = true
+ if err := r.saveFor(layer); err != nil {
+ return err
+ }
+ }
+ // We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
- err := r.driver.Remove(id)
- if err != nil {
+ if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
-
os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id))
delete(r.byid, id)
for _, name := range layer.Names {
delete(r.byname, name)
}
- r.idindex.Delete(id)
+ // This can only fail if the ID is already missing, which shouldn’t
+ // happen — and in that case the index is already in the desired state
+ // anyway. The store’s Delete method is used on various paths to
+ // recover from failures, so this should be robust against partially
+ // missing data.
+ _ = r.idindex.Delete(id)
mountLabel := layer.MountLabel
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
r.deleteInDigestMap(id)
- toDeleteIndex := -1
- for i, candidate := range r.layers {
- if candidate.ID == id {
- toDeleteIndex = i
- break
- }
- }
- if toDeleteIndex != -1 {
- // delete the layer at toDeleteIndex
- if toDeleteIndex == len(r.layers)-1 {
- r.layers = r.layers[:len(r.layers)-1]
- } else {
- r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
- }
- }
- if mountLabel != "" {
- var found bool
- for _, candidate := range r.layers {
- if candidate.MountLabel == mountLabel {
- found = true
- break
- }
- }
- if !found {
- label.ReleaseLabel(mountLabel)
- }
+ r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
+ return candidate.ID == id
+ })
+ if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
+ return candidate.MountLabel == mountLabel
+ }) {
+ selinux.ReleaseLabel(mountLabel)
}
-
return nil
}
+// Requires startWriting.
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
- for i, layerID := range layers {
- if layerID == id {
- layers = append(layers[:i], layers[i+1:]...)
- r.bycompressedsum[digest] = layers
- break
- }
+ if i := slices.Index(layers, id); i != -1 {
+ layers = slices.Delete(layers, i, i+1)
+ r.bycompressedsum[digest] = layers
}
}
for digest, layers := range r.byuncompressedsum {
- for i, layerID := range layers {
- if layerID == id {
- layers = append(layers[:i], layers[i+1:]...)
- r.byuncompressedsum[digest] = layers
- break
- }
+ if i := slices.Index(layers, id); i != -1 {
+ layers = slices.Delete(layers, i, i+1)
+ r.byuncompressedsum[digest] = layers
}
}
}
+// Requires startWriting.
func (r *layerStore) Delete(id string) error {
layer, ok := r.lookup(id)
if !ok {
@@ -1233,37 +1962,28 @@ func (r *layerStore) Delete(id string) error {
// The layer may already have been explicitly unmounted, but if not, we
// should try to clean that up before we start deleting anything at the
// driver level.
- mountCount, err := r.Mounted(id)
- if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
- }
- for mountCount > 0 {
- if _, err := r.Unmount(id, false); err != nil {
- return err
+ for {
+ _, err := r.unmount(id, false, true)
+ if err == ErrLayerNotMounted {
+ break
}
- mountCount, err = r.Mounted(id)
if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ return err
}
}
if err := r.deleteInternal(id); err != nil {
return err
}
- return r.Save()
-}
-
-func (r *layerStore) Lookup(name string) (id string, err error) {
- if layer, ok := r.lookup(name); ok {
- return layer.ID, nil
- }
- return "", ErrLayerUnknown
+ return r.saveFor(layer)
}
+// Requires startReading or startWriting.
func (r *layerStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
+// Requires startReading or startWriting.
func (r *layerStore) Get(id string) (*Layer, error) {
if layer, ok := r.lookup(id); ok {
return copyLayer(layer), nil
@@ -1271,22 +1991,39 @@ func (r *layerStore) Get(id string) (*Layer, error) {
return nil, ErrLayerUnknown
}
+// Requires startWriting.
func (r *layerStore) Wipe() error {
- if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ if !r.lockfile.IsReadWrite() {
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}
+ sort.Slice(ids, func(i, j int) bool {
+ return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
+ })
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
}
}
+ ids, err := r.driver.ListLayers()
+ if err != nil {
+ if !errors.Is(err, drivers.ErrNotSupported) {
+ return err
+ }
+ ids = nil
+ }
+ for _, id := range ids {
+ if err := r.driver.Remove(id); err != nil {
+ return err
+ }
+ }
return nil
}
+// Requires startReading or startWriting.
func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) {
var ok bool
toLayer, ok = r.lookup(to)
@@ -1311,6 +2048,7 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st
return from, to, fromLayer, toLayer, nil
}
+// The caller must hold r.inProcessLock for reading.
func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
if layer == nil {
return &idtools.IDMappings{}
@@ -1318,6 +2056,10 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
}
+// Requires startReading or startWriting.
+//
+// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store,
+// see drivers/overlay.Driver.getMergedDir.
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
@@ -1336,16 +2078,25 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
return os.Open(filepath.Join(s.path, path))
}
+// LOCKING BUG: See the comments in layerStore.Diff
func (s *simpleGetCloser) Close() error {
- _, err := s.r.Unmount(s.id, false)
+ _, err := s.r.unmount(s.id, false, false)
return err
}
+// LOCKING BUG: See the comments in layerStore.Diff
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
- return getter.DiffGetter(id)
+ fgc, err := getter.DiffGetter(id)
+ if err != nil {
+ return nil, err
+ }
+ if fgc != nil {
+ return fgc, nil
+ }
}
- path, err := r.Mount(id, drivers.MountOpts{})
+
+ path, err := r.Mount(id, drivers.MountOpts{Options: []string{"ro"}})
if err != nil {
return nil, err
}
@@ -1356,6 +2107,28 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
}, nil
}
+// writeCompressedData copies data from source to compressor, which is on top of pwriter.
+func writeCompressedData(compressor io.WriteCloser, source io.ReadCloser) error {
+ defer compressor.Close()
+ defer source.Close()
+ _, err := io.Copy(compressor, source)
+ return err
+}
+
+// writeCompressedDataGoroutine copies data from source to compressor, which is on top of pwriter.
+// All error must be reported by updating pwriter.
+func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteCloser, source io.ReadCloser) {
+ err := errors.New("internal error: unexpected panic in writeCompressedDataGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = pwriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+ err = writeCompressedData(compressor, source)
+}
+
+// Requires startReading or startWriting.
+//
+// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store,
+// see drivers/overlay.Driver.getMergedDir.
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
var metadata storage.Unpacker
@@ -1387,12 +2160,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
preader.Close()
return nil, err
}
- go func() {
- defer pwriter.Close()
- defer compressor.Close()
- defer rc.Close()
- io.Copy(compressor, rc)
- }()
+ go writeCompressedDataGoroutine(pwriter, compressor, rc)
return preader, nil
}
@@ -1406,7 +2174,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok {
if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil {
- // This is an additional layer. We leverage blob API for aquiring the reproduced raw blob.
+ // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob.
info, err := aLayer.Info()
if err != nil {
aLayer.Release()
@@ -1428,7 +2196,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
diff, err := archive.DecompressStream(blob)
if err != nil {
if err2 := blob.Close(); err2 != nil {
- err = errors.Wrapf(err, "failed to close blob file: %v", err2)
+ err = fmt.Errorf("failed to close blob file: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1436,7 +2204,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
rc, err := maybeCompressReadCloser(diff)
if err != nil {
if err2 := closeAll(blob.Close, diff.Close); err2 != nil {
- err = errors.Wrapf(err, "failed to cleanup: %v", err2)
+ err = fmt.Errorf("failed to cleanup: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1461,38 +2229,55 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
}
return maybeCompressReadCloser(diff)
}
- defer tsfile.Close()
decompressor, err := pgzip.NewReader(tsfile)
if err != nil {
- return nil, err
- }
- defer decompressor.Close()
-
- tsbytes, err := ioutil.ReadAll(decompressor)
- if err != nil {
+ if e := tsfile.Close(); e != nil {
+ logrus.Debug(e)
+ }
return nil, err
}
- metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes))
+ metadata = storage.NewJSONUnpacker(decompressor)
+ // LOCKING BUG: With btrfs and zfs graph drivers), this uses r.Mount() and r.unmount() holding layerStore only locked for reading
+ // but they modify in-memory state.
fgetter, err := r.newFileGetter(to)
if err != nil {
- return nil, err
+ errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
+ if err := decompressor.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
+ }
+ if err := tsfile.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
+ }
+ return nil, errs.ErrorOrNil()
}
tarstream := asm.NewOutputTarStream(fgetter, metadata)
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
- err1 := tarstream.Close()
- err2 := fgetter.Close()
- if err2 == nil {
- return err1
+ var errs *multierror.Error
+ if err := decompressor.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
+ }
+ if err := tsfile.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
+ }
+ if err := tarstream.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
}
- return err2
+ if err := fgetter.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
+ }
+ if errs != nil {
+ return errs.ErrorOrNil()
+ }
+ return nil
})
return maybeCompressReadCloser(rc)
}
+// Requires startReading or startWriting.
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
var fromLayer, toLayer *Layer
from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to)
@@ -1502,9 +2287,34 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
}
+func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
+ var newList []string
+ if oldvalue != "" {
+ for _, value := range (*m)[oldvalue] {
+ if value != id {
+ newList = append(newList, value)
+ }
+ }
+ if len(newList) > 0 {
+ (*m)[oldvalue] = newList
+ } else {
+ delete(*m, oldvalue)
+ }
+ }
+ if newvalue != "" {
+ (*m)[newvalue] = append((*m)[newvalue], id)
+ }
+}
+
+// Requires startWriting.
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
- if !r.IsReadWrite() {
- return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
+ return r.applyDiffWithOptions(to, nil, diff)
+}
+
+// Requires startWriting.
+func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
+ if !r.lockfile.IsReadWrite() {
+ return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
@@ -1517,83 +2327,113 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil && err != io.EOF {
return -1, err
}
-
compression := archive.DetectCompression(header[:n])
- compressedDigest := digest.Canonical.Digester()
- compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
- defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
-
- tsdata := bytes.Buffer{}
- compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
- if err != nil {
- compressor = pgzip.NewWriter(&tsdata)
+ defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff)
+
+ // Decide if we need to compute digests
+ var compressedDigest, uncompressedDigest digest.Digest // = ""
+ var compressedDigester, uncompressedDigester digest.Digester // = nil
+ if layerOptions != nil && layerOptions.OriginalDigest != "" &&
+ layerOptions.OriginalDigest.Algorithm() == digest.Canonical {
+ compressedDigest = layerOptions.OriginalDigest
+ } else {
+ compressedDigester = digest.Canonical.Digester()
}
- metadata := storage.NewJSONPacker(compressor)
- uncompressed, err := archive.DecompressStream(defragmented)
- if err != nil {
- return -1, err
+ if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
+ layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
+ uncompressedDigest = layerOptions.UncompressedDigest
+ } else if compression != archive.Uncompressed {
+ uncompressedDigester = digest.Canonical.Digester()
+ }
+
+ var compressedWriter io.Writer
+ if compressedDigester != nil {
+ compressedWriter = compressedDigester.Hash()
+ } else {
+ compressedWriter = io.Discard
}
- defer uncompressed.Close()
- uncompressedDigest := digest.Canonical.Digester()
- uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
+ compressedCounter := ioutils.NewWriteCounter(compressedWriter)
+ defragmented = io.TeeReader(defragmented, compressedCounter)
+
+ tsdata := bytes.Buffer{}
uidLog := make(map[uint32]struct{})
gidLog := make(map[uint32]struct{})
- idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
- if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) {
- uidLog[uint32(h.Uid)] = struct{}{}
- gidLog[uint32(h.Gid)] = struct{}{}
+ var uncompressedCounter *ioutils.WriteCounter
+
+ size, err = func() (int64, error) { // A scope for defer
+ compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
+ if err != nil {
+ return -1, err
}
- })
+ defer compressor.Close() // This must happen before tsdata is consumed.
+ if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
+ logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
+ }
+ metadata := storage.NewJSONPacker(compressor)
+ uncompressed, err := archive.DecompressStream(defragmented)
+ if err != nil {
+ return -1, err
+ }
+ defer uncompressed.Close()
+ idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
+ if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) {
+ uidLog[uint32(h.Uid)] = struct{}{}
+ gidLog[uint32(h.Gid)] = struct{}{}
+ }
+ })
+ if err != nil {
+ return -1, err
+ }
+ defer idLogger.Close() // This must happen before uidLog and gidLog is consumed.
+ uncompressedCounter = ioutils.NewWriteCounter(idLogger)
+ uncompressedWriter := (io.Writer)(uncompressedCounter)
+ if uncompressedDigester != nil {
+ uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash())
+ }
+ payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter())
+ if err != nil {
+ return -1, err
+ }
+ options := drivers.ApplyDiffOpts{
+ Diff: payload,
+ Mappings: r.layerMappings(layer),
+ MountLabel: layer.MountLabel,
+ }
+ size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options)
+ if err != nil {
+ return -1, err
+ }
+ return size, err
+ }()
if err != nil {
return -1, err
}
- defer idLogger.Close()
- payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter())
- if err != nil {
+
+ if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return -1, err
}
- options := drivers.ApplyDiffOpts{
- Diff: payload,
- Mappings: r.layerMappings(layer),
- MountLabel: layer.MountLabel,
- }
- size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options)
- if err != nil {
+ if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return -1, err
}
- compressor.Close()
- if err == nil {
- if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
- return -1, err
- }
- if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
- return -1, err
- }
+ if compressedDigester != nil {
+ compressedDigest = compressedDigester.Digest()
+ }
+ if uncompressedDigester != nil {
+ uncompressedDigest = uncompressedDigester.Digest()
+ }
+ if uncompressedDigest == "" && compression == archive.Uncompressed {
+ uncompressedDigest = compressedDigest
}
- updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
- var newList []string
- if oldvalue != "" {
- for _, value := range (*m)[oldvalue] {
- if value != id {
- newList = append(newList, value)
- }
- }
- if len(newList) > 0 {
- (*m)[oldvalue] = newList
- } else {
- delete(*m, oldvalue)
- }
- }
- if newvalue != "" {
- (*m)[newvalue] = append((*m)[newvalue], id)
- }
+ updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
+ layer.CompressedDigest = compressedDigest
+ if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
+ layer.CompressedSize = *layerOptions.OriginalSize
+ } else {
+ layer.CompressedSize = compressedCounter.Count
}
- updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
- layer.CompressedDigest = compressedDigest.Digest()
- layer.CompressedSize = compressedCounter.Count
- updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
- layer.UncompressedDigest = uncompressedDigest.Digest()
+ updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
+ layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
layer.CompressionType = compression
layer.UIDs = make([]uint32, 0, len(uidLog))
@@ -1611,11 +2451,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return layer.GIDs[i] < layer.GIDs[j]
})
- err = r.Save()
+ err = r.saveFor(layer)
return size, err
}
+// Requires (startReading or?) startWriting.
func (r *layerStore) DifferTarget(id string) (string, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
@@ -1628,7 +2469,8 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
return ddriver.DifferTarget(layer.ID)
}
-func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
+// Requires startWriting.
+func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return ErrNotSupported
@@ -1638,60 +2480,79 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return ErrLayerUnknown
}
if options == nil {
- options = &drivers.ApplyDiffOpts{
- Mappings: r.layerMappings(layer),
- MountLabel: layer.MountLabel,
+ options = &drivers.ApplyDiffWithDifferOpts{
+ ApplyDiffOpts: drivers.ApplyDiffOpts{
+ Mappings: r.layerMappings(layer),
+ MountLabel: layer.MountLabel,
+ },
+ Flags: nil,
}
}
- err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
+
+ err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options)
if err != nil {
return err
}
layer.UIDs = diffOutput.UIDs
layer.GIDs = diffOutput.GIDs
+ updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
layer.UncompressedDigest = diffOutput.UncompressedDigest
+ updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, diffOutput.CompressedDigest, layer.ID)
+ layer.CompressedDigest = diffOutput.CompressedDigest
+ updateDigestMap(&r.bytocsum, layer.TOCDigest, diffOutput.TOCDigest, layer.ID)
+ layer.TOCDigest = diffOutput.TOCDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
- if err = r.Save(); err != nil {
+ if options != nil && options.Flags != nil {
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ maps.Copy(layer.Flags, options.Flags)
+ }
+ if err = r.saveFor(layer); err != nil {
return err
}
+
+ if diffOutput.TarSplit != nil {
+ tsdata := bytes.Buffer{}
+ compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
+ if err != nil {
+ compressor = pgzip.NewWriter(&tsdata)
+ }
+ if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
+ logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
+ }
+ if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
+ compressor.Close()
+ return err
+ }
+ compressor.Close()
+ if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
+ return err
+ }
+ if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
+ return err
+ }
+ }
for k, v := range diffOutput.BigData {
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
- r.Delete(id)
+ if err2 := r.Delete(id); err2 != nil {
+ logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2)
+ }
return err
}
}
return err
}
-func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+// It must be called without any c/storage locks held to allow differ to make c/storage calls.
+func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return nil, ErrNotSupported
}
- if to == "" {
- output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
- return &output, err
- }
-
- layer, ok := r.lookup(to)
- if !ok {
- return nil, ErrLayerUnknown
- }
- if options == nil {
- options = &drivers.ApplyDiffOpts{
- Mappings: r.layerMappings(layer),
- MountLabel: layer.MountLabel,
- }
- }
- output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
- if err != nil {
- return nil, err
- }
- layer.UIDs = output.UIDs
- layer.GIDs = output.GIDs
- err = r.Save()
+ output, err := ddriver.ApplyDiffWithDiffer(options, differ)
return &output, err
}
@@ -1703,6 +2564,7 @@ func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error {
return ddriver.CleanupStagingDirectory(stagingDirectory)
}
+// Requires startReading or startWriting.
func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
var layers []Layer
for _, layerID := range m[d] {
@@ -1715,82 +2577,29 @@ func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Di
return layers, nil
}
+// Requires startReading or startWriting.
func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bycompressedsum, d)
}
+// Requires startReading or startWriting.
func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.byuncompressedsum, d)
}
-func (r *layerStore) Lock() {
- r.lockfile.Lock()
-}
-
-func (r *layerStore) RecursiveLock() {
- r.lockfile.RecursiveLock()
-}
-
-func (r *layerStore) RLock() {
- r.lockfile.RLock()
-}
-
-func (r *layerStore) Unlock() {
- r.lockfile.Unlock()
-}
-
-func (r *layerStore) Touch() error {
- return r.lockfile.Touch()
-}
-
-func (r *layerStore) Modified() (bool, error) {
- var mmodified bool
- lmodified, err := r.lockfile.Modified()
- if err != nil {
- return lmodified, err
- }
- if r.IsReadWrite() {
- r.mountsLockfile.RLock()
- defer r.mountsLockfile.Unlock()
- mmodified, err = r.mountsLockfile.Modified()
- if err != nil {
- return lmodified, err
- }
- }
- return lmodified || mmodified, nil
-}
-
-func (r *layerStore) IsReadWrite() bool {
- return r.lockfile.IsReadWrite()
-}
-
-func (r *layerStore) TouchedSince(when time.Time) bool {
- return r.lockfile.TouchedSince(when)
-}
-
-func (r *layerStore) Locked() bool {
- return r.lockfile.Locked()
-}
-
-func (r *layerStore) ReloadIfChanged() error {
- r.loadMut.Lock()
- defer r.loadMut.Unlock()
-
- modified, err := r.Modified()
- if err == nil && modified {
- return r.Load()
- }
- return err
+// Requires startReading or startWriting.
+func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
+ return r.layersByDigestMap(r.bytocsum, d)
}
func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {
if rErr == nil {
- rErr = errors.Wrapf(err, "close error")
+ rErr = fmt.Errorf("close error: %w", err)
continue
}
- rErr = errors.Wrapf(rErr, "%v", err)
+ rErr = fmt.Errorf("%v: %w", err, rErr)
}
}
return
diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go
index 6fac2ebac..ec98b40ce 100644
--- a/vendor/github.com/containers/storage/lockfile_compat.go
+++ b/vendor/github.com/containers/storage/lockfile_compat.go
@@ -4,12 +4,15 @@ import (
"github.com/containers/storage/pkg/lockfile"
)
-type Locker = lockfile.Locker
+// Deprecated: Use lockfile.*LockFile.
+type Locker = lockfile.Locker //nolint:staticcheck // SA1019 lockfile.Locker is deprecated
+// Deprecated: Use lockfile.GetLockFile.
func GetLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetLockfile(path)
}
+// Deprecated: Use lockfile.GetROLockFile.
func GetROLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetROLockfile(path)
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index aa6689593..13a458956 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -5,12 +5,14 @@ import (
"bufio"
"bytes"
"compress/bzip2"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"sync"
"syscall"
@@ -22,8 +24,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
gzip "github.com/klauspost/pgzip"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -70,13 +70,20 @@ type (
}
)
+const PaxSchilyXattr = "SCHILY.xattr."
+
const (
- tarExt = "tar"
- solaris = "solaris"
- windows = "windows"
- containersOverrideXattr = "user.containers.override_stat"
+ tarExt = "tar"
+ solaris = "solaris"
+ windows = "windows"
+ darwin = "darwin"
+ freebsd = "freebsd"
)
+var xattrsToIgnore = map[string]interface{}{
+ "security.selinux": true,
+}
+
// Archiver allows the reuse of most utility functions of this package with a
// pluggable Untar function. To facilitate the passing of specific id mappings
// for untar, an archiver can be created with maps which will then be passed to
@@ -126,16 +133,6 @@ const (
OverlayWhiteoutFormat
)
-const (
- modeISDIR = 040000 // Directory
- modeISFIFO = 010000 // FIFO
- modeISREG = 0100000 // Regular file
- modeISLNK = 0120000 // Symbolic link
- modeISBLK = 060000 // Block special file
- modeISCHR = 020000 // Character special file
- modeISSOCK = 0140000 // Socket
-)
-
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
func IsArchivePath(path string) bool {
@@ -174,10 +171,17 @@ func DetectCompression(source []byte) Compression {
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
-func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
+
+ defer func() {
+ if Err != nil {
+ p.Put(buf)
+ }
+ }()
+
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
@@ -194,6 +198,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
return readBufWrapper, nil
case Gzip:
+ cleanup := func() {
+ p.Put(buf)
+ }
+ if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse {
+ return rc, nil
+ }
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
@@ -212,16 +222,29 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return readBufWrapper, nil
case Zstd:
+ cleanup := func() {
+ p.Put(buf)
+ }
+ if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse {
+ return rc, nil
+ }
return zstdReader(buf)
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
// CompressStream compresses the dest with specified compression algorithm.
-func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
+
+ defer func() {
+ if Err != nil {
+ p.Put(buf)
+ }
+ }()
+
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
@@ -235,9 +258,9 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
@@ -323,7 +346,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
}
pipeWriter.Close()
-
}()
return pipeReader
}
@@ -345,19 +367,50 @@ func (compression *Compression) Extension() string {
return ""
}
+// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
+// prevent tar.FileInfoHeader from introspecting it and potentially calling into
+// glibc.
+type nosysFileInfo struct {
+ os.FileInfo
+}
+
+func (fi nosysFileInfo) Sys() interface{} {
+ // A Sys value of type *tar.Header is safe as it is system-independent.
+ // The tar.FileInfoHeader function copies the fields into the returned
+ // header without performing any OS lookups.
+ if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
+ return sys
+ }
+ return nil
+}
+
+// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi.
+var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error
+
+func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
+ if sysStatOverride == nil {
+ return tar.FileInfoHeader(fi, link)
+ }
+ hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
+ if err != nil {
+ return nil, err
+ }
+ return hdr, sysStatOverride(fi, hdr)
+}
+
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := tar.FileInfoHeader(fi, link)
+ hdr, err := fileInfoHeaderNoLookups(fi, link)
if err != nil {
return nil, err
}
- hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
- return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
@@ -366,57 +419,32 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
return hdr, nil
}
-// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
-// https://github.com/golang/go/commit/66b5a2f
-func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
- fm := fi.Mode()
- switch {
- case fm.IsRegular():
- mode |= modeISREG
- case fi.IsDir():
- mode |= modeISDIR
- case fm&os.ModeSymlink != 0:
- mode |= modeISLNK
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- mode |= modeISCHR
- } else {
- mode |= modeISBLK
- }
- case fm&os.ModeNamedPipe != 0:
- mode |= modeISFIFO
- case fm&os.ModeSocket != 0:
- mode |= modeISSOCK
- }
- return mode
-}
-
-// ReadSecurityXattrToTarHeader reads security.capability, security,image
+// readSecurityXattrToTarHeader reads security.capability, security,image
// xattrs from filesystem to a tar header
-func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
+func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ if hdr.PAXRecords == nil {
+ hdr.PAXRecords = make(map[string]string)
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
+ return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
- hdr.Xattrs[xattr] = string(capability)
+ hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability)
}
}
return nil
}
-// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
-func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
+// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
+func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return err
}
for _, key := range xattrs {
- if strings.HasPrefix(key, "user.") {
+ if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") {
value, err := system.Lgetxattr(path, key)
if err != nil {
if errors.Is(err, system.E2BIG) {
@@ -425,10 +453,10 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
}
return err
}
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
+ if hdr.PAXRecords == nil {
+ hdr.PAXRecords = make(map[string]string)
}
- hdr.Xattrs[key] = string(value)
+ hdr.PAXRecords[PaxSchilyXattr+key] = string(value)
}
}
return nil
@@ -478,7 +506,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
}
// canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
+// path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
@@ -507,15 +535,22 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return err
}
}
+ if fi.Mode()&os.ModeSocket != 0 {
+ logrus.Infof("archive: skipping %q since it is a socket", path)
+ return nil
+ }
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
return err
}
- if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
- if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
+ if err := readUserXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+ if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
return err
}
if ta.CopyPass {
@@ -540,9 +575,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
}
- //handle re-mapping container ID mappings back to host ID mappings before
- //writing tar headers/files. We skip whiteout files because they were written
- //by the kernel and already have proper ownership relative to the host
+ // handle re-mapping container ID mappings back to host ID mappings before
+ // writing tar headers/files. We skip whiteout files because they were written
+ // by the kernel and already have proper ownership relative to the host
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
fileIDPair, err := getFileUIDGID(fi.Sys())
if err != nil {
@@ -558,6 +593,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if ta.ChownOpts != nil {
hdr.Uid = ta.ChownOpts.UID
hdr.Gid = ta.ChownOpts.GID
+ // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway,
+ // and they unnecessarily give recipients of the tar file potentially private data.
+ hdr.Uname = ""
+ hdr.Gname = ""
}
maybeTruncateHeaderModTime(hdr)
@@ -631,7 +670,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
- case tar.TypeReg, tar.TypeRegA:
+ case tar.TypeReg:
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
@@ -645,10 +684,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
file.Close()
- case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
+ logrus.Debugf("Tar: Can't create device %v while running in user namespace", path)
return nil
}
+ fallthrough
+ case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
@@ -660,7 +702,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
- if err := os.Link(targetPath, path); err != nil {
+ if err := handleLLink(targetPath, path); err != nil {
return err
}
@@ -686,9 +728,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
- if forceMask != nil && hdr.Typeflag != tar.TypeSymlink {
- value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
- if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil {
+ if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
+ value := idtools.Stat{
+ IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
+ Mode: hdrInfo.Mode() & 0o7777,
+ }
+ if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
return err
}
}
@@ -739,8 +784,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
var errs []string
- for key, value := range hdr.Xattrs {
- if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ for key, value := range hdr.PAXRecords {
+ xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr)
+ if !ok {
+ continue
+ }
+ if _, found := xattrsToIgnore[xattrKey]; found {
+ continue
+ }
+ if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
@@ -755,6 +807,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
+ // We defer setting flags on directories until the end of
+ // Unpack or UnpackLayer in case setting them makes the
+ // directory immutable.
+ if hdr.Typeflag != tar.TypeDir {
+ if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
+ return err
+ }
+ }
+
if len(errs) > 0 {
logrus.WithFields(logrus.Fields{
"errors": errs,
@@ -773,7 +834,6 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
-
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
@@ -849,17 +909,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
- filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
if err != nil {
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
- if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
- return nil
+ return nil //nolint: nilerr
}
if options.IncludeSourceDir && include == "." && relFilePath != "." {
@@ -876,8 +936,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
matches, err := pm.IsMatch(relFilePath)
if err != nil {
- logrus.Errorf("Error matching %s: %v", relFilePath, err)
- return err
+ return fmt.Errorf("matching %s: %w", relFilePath, err)
}
skip = matches
}
@@ -889,7 +948,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
- if !f.IsDir() {
+ if !d.IsDir() {
return nil
}
@@ -940,7 +999,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
}
return nil
- })
+ }); err != nil {
+ logrus.Errorf("%s", err)
+ return
+ }
}
}()
@@ -959,15 +1021,12 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
+ doChown := !options.NoLchown
if options.ForceMask != nil {
- uid, gid, mode, err := GetFileOwner(dest)
- if err == nil {
- value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
- if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
- return err
- }
- }
+ // if ForceMask is in place, make sure lchown is disabled.
+ doChown = false
}
+ var rootHdr *tar.Header
// Iterate through the files in the archive.
loop:
@@ -999,8 +1058,8 @@ loop:
// Not the root directory, ensure that the parent directory exists
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs)
if err != nil {
return err
}
@@ -1012,6 +1071,9 @@ loop:
if err != nil {
return err
}
+ if rel == "." {
+ rootHdr = hdr
+ }
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
@@ -1064,7 +1126,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
+ if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
@@ -1081,14 +1143,32 @@ loop:
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return err
}
+ if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
+ return err
+ }
}
+
+ if options.ForceMask != nil {
+ value := idtools.Stat{Mode: 0o755}
+ if rootHdr != nil {
+ value.IDs.UID = rootHdr.Uid
+ value.IDs.GID = rootHdr.Gid
+ value.Mode = os.FileMode(rootHdr.Mode)
+ }
+ if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
+ return err
+ }
+ }
+
return nil
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
+//
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
@@ -1104,7 +1184,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
@@ -1140,7 +1220,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1155,7 +1235,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1175,7 +1255,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1202,7 +1282,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
- if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
@@ -1220,7 +1300,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
if srcSt.IsDir() {
- return fmt.Errorf("Can't copy a directory")
+ return fmt.Errorf("can't copy a directory")
}
// Clean up the trailing slash. This must be done in an operating
@@ -1229,7 +1309,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
- if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
return err
}
@@ -1275,7 +1355,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: rsystem.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
NoOverwriteDirNonDir: true,
}
err = archiver.Untar(r, filepath.Dir(dst), options)
@@ -1295,6 +1375,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
if err != nil {
return err
}
+ } else if runtime.GOOS == darwin {
+ uid, gid = hdr.Uid, hdr.Gid
+ if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[0], 10, 32)
+ if err != nil {
+ uid = int(val)
+ }
+ val, err = strconv.ParseUint(attrs[1], 10, 32)
+ if err != nil {
+ gid = int(val)
+ }
+ }
+ }
} else {
uid, gid = hdr.Uid, hdr.Gid
}
@@ -1314,7 +1409,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
- f, err := ioutil.TempFile(dir, "")
+ f, err := os.CreateTemp(dir, "")
if err != nil {
return nil, err
}
@@ -1370,7 +1465,7 @@ func IsArchive(header []byte) bool {
if compression != Uncompressed {
return true
}
- r := tar.NewReader(bytes.NewBuffer(header))
+ r := tar.NewReader(bytes.NewReader(header))
_, err := r.Next()
return err == nil
}
@@ -1416,7 +1511,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -1435,11 +1530,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ if err == nil && hashError != nil {
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
index 7bc44a566..db614cdd6 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
@@ -1,4 +1,4 @@
-// +build go1.10
+//go:build go1.10
package archive
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
index d19811fdb..304464fe7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
@@ -1,4 +1,4 @@
-// +build !go1.10
+//go:build !go1.10
package archive
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
new file mode 100644
index 000000000..74e62331a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
@@ -0,0 +1,18 @@
+//go:build netbsd || freebsd || darwin
+
+package archive
+
+import (
+ "archive/tar"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
+ permissionsMask := hdrInfo.Mode()
+ if forceMask != nil {
+ permissionsMask = *forceMask
+ }
+ return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
deleted file mode 100644
index 1953b4051..000000000
--- a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// +build freebsd
-
-package archive
-
-import (
- "archive/tar"
- "errors"
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
- "golang.org/x/sys/unix"
-)
-
-// fixVolumePathPrefix does platform specific processing to ensure that if
-// the path being passed in is not in a volume path format, convert it to one.
-func fixVolumePathPrefix(srcPath string) string {
- return srcPath
-}
-
-// getWalkRoot calculates the root path when performing a TarWithOptions.
-// We use a separate function as this is platform specific. On Linux, we
-// can't use filepath.Join(srcPath,include) because this will clean away
-// a trailing "." or "/" which may be important.
-func getWalkRoot(srcPath string, include string) string {
- return srcPath + string(filepath.Separator) + include
-}
-
-// CanonicalTarNameForPath returns platform-specific filepath
-// to canonical posix-style path for tar archival. p is relative
-// path.
-func CanonicalTarNameForPath(p string) (string, error) {
- return p, nil // already unix-style
-}
-
-// chmodTarEntry is used to adjust the file permissions used in tar header based
-// on the platform the archival is done.
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- return perm // noop for unix as golang APIs provide perm bits correctly
-}
-
-func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if ok {
- // Currently go does not fill in the major/minors
- if s.Mode&unix.S_IFBLK != 0 ||
- s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
- hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
- }
- }
-
- return
-}
-
-func getInodeFromStat(stat interface{}) (inode uint64, err error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if ok {
- inode = s.Ino
- }
-
- return
-}
-
-func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if !ok {
- return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
- }
- return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
-}
-
-func major(device uint64) uint64 {
- return (device >> 8) & 0xfff
-}
-
-func minor(device uint64) uint64 {
- return (device & 0xff) | ((device >> 12) & 0xfff00)
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if rsystem.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
- mode := uint32(hdr.Mode & 07777)
- switch hdr.Typeflag {
- case tar.TypeBlock:
- mode |= unix.S_IFBLK
- case tar.TypeChar:
- mode |= unix.S_IFCHR
- case tar.TypeFifo:
- mode |= unix.S_IFIFO
- }
-
- return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
- permissionsMask := hdrInfo.Mode()
- if forceMask != nil {
- permissionsMask = *forceMask
- }
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 2f548b661..b9d718b60 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
- hdr.Mode = 0600
+ hdr.Mode = 0
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
@@ -48,12 +48,12 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
- if hdr.Xattrs != nil {
- delete(hdr.Xattrs, getOverlayOpaqueXattrName())
+ if hdr.PAXRecords != nil {
+ delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName())
}
// If there are no lower layers, then it can't have been deleted in this layer.
if len(o.rolayers) == 0 {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
// At this point, we have a directory that's opaque. If it appears in one of the lower
// layers, then it was newly-created here, so it wasn't also deleted here.
@@ -66,7 +66,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
if statErr == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
if isWhiteOut(stat) {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
}
// It's not whiteout, so it was there in the older layer, so we need to
@@ -100,7 +100,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// original directory wasn't inherited into this layer,
// so we don't need to emit whiteout for it.
if isWhiteOut(stat) {
- return nil, nil
+ return nil, nil //nolint: nilnil
}
}
}
@@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
}
// if a file was deleted and we are using overlay, we need to create a character device
- if strings.HasPrefix(base, WhiteoutPrefix) {
- originalBase := base[len(WhiteoutPrefix):]
+ if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok {
originalPath := filepath.Join(dir, originalBase)
if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
@@ -153,8 +152,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
return true, nil
}
-type directHandler struct {
-}
+type directHandler struct{}
func (d directHandler) Setxattr(path, name string, value []byte) error {
return unix.Setxattr(path, name, value, 0)
@@ -185,7 +183,26 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) {
}
s, ok := f.Sys().(*syscall.Stat_t)
if ok {
- return s.Uid, s.Gid, s.Mode & 07777, nil
+ return s.Uid, s.Gid, s.Mode & 0o7777, nil
}
return 0, 0, uint32(f.Mode()), nil
}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
+ permissionsMask := hdrInfo.Mode()
+ if forceMask != nil {
+ permissionsMask = *forceMask
+ }
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, permissionsMask); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, permissionsMask); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
index 4b8834444..b342ff75e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux
package archive
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index 5438700ab..56f2086bc 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!freebsd
+//go:build !windows
package archive
@@ -11,10 +11,34 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
+func init() {
+ sysStatOverride = statUnix
+}
+
+// statUnix populates hdr from system-dependent fields of fi without performing
+// any OS lookups.
+// Adapted from Moby.
+func statUnix(fi os.FileInfo, hdr *tar.Header) error {
+ s, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ hdr.Uid = int(s.Uid)
+ hdr.Gid = int(s.Gid)
+
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
+ }
+
+ return nil
+}
+
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
@@ -50,8 +74,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
- hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert
}
}
@@ -88,12 +112,7 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if rsystem.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
- mode := uint32(hdr.Mode & 07777)
+ mode := uint32(hdr.Mode & 0o7777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
@@ -103,24 +122,15 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode |= unix.S_IFIFO
}
- return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+ return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
}
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
- permissionsMask := hdrInfo.Mode()
- if forceMask != nil {
- permissionsMask = *forceMask
- }
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
- }
- return nil
+// Hardlink without symlinks
+func handleLLink(targetPath, path string) error {
+ // Note: on Linux, the link syscall will not follow symlinks.
+ // This behavior is implementation-dependent since
+ // POSIX.1-2008 so to make it clear that we need non-symlink
+ // following here we use the linkat syscall which has a flags
+ // field to select symlink following or not.
+ return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index a0872444f..6db31cf4c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package archive
@@ -34,21 +34,20 @@ func CanonicalTarNameForPath(p string) (string, error) {
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
- return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ return "", fmt.Errorf("windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
-
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
- //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
- permPart |= 0111
- permPart &= 0755
+ permPart |= 0o111
+ permPart &= 0o755
return noPermPart | permPart
}
@@ -77,3 +76,8 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
// no notion of file ownership mapping yet on Windows
return idtools.IDPair{0, 0}, nil
}
+
+// Hardlink without following symlinks
+func handleLLink(targetPath string, path string) error {
+ return os.Link(targetPath, path)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index c7bb25d0f..3075c27bb 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -5,7 +5,7 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
+ "maps"
"os"
"path/filepath"
"reflect"
@@ -14,6 +14,7 @@ import (
"syscall"
"time"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
@@ -57,7 +58,7 @@ func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
-// for sort.Sort
+// changesByPath implements sort.Interface.
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
@@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
- if strings.HasPrefix(f, WhiteoutPrefix) {
- originalFile := f[len(WhiteoutPrefix):]
+ if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok {
return filepath.Join(filepath.Dir(path), originalFile), nil
}
@@ -107,7 +107,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
func aufsWhiteoutPresent(root, path string) (bool, error) {
f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path))
- _, err := os.Stat(filepath.Join(root, f))
+ err := fileutils.Exists(filepath.Join(root, f))
if err == nil {
return true, nil
}
@@ -132,9 +132,11 @@ func isENOTDIR(err error) bool {
return false
}
-type skipChange func(string) (bool, error)
-type deleteChange func(string, string, os.FileInfo) (string, error)
-type whiteoutChange func(string, string) (bool, error)
+type (
+ skipChange func(string) (bool, error)
+ deleteChange func(string, string, os.FileInfo) (string, error)
+ whiteoutChange func(string, string) (bool, error)
+)
func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) {
var (
@@ -300,7 +302,6 @@ func (info *FileInfo) path() string {
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
-
sizeAtEntry := len(*changes)
if oldInfo == nil {
@@ -318,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
- for k, v := range oldInfo.children {
- oldChildren[k] = v
- }
+ maps.Copy(oldChildren, oldInfo.children)
}
for name, newChild := range info.children {
@@ -374,7 +373,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
-
}
// Changes add changes to file information.
@@ -399,11 +397,9 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) {
- var (
- oldRoot, newRoot *FileInfo
- )
+ var oldRoot, newRoot *FileInfo
if oldDir == "" {
- emptyDir, err := ioutil.TempDir("", "empty")
+ emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
index a3addebe6..dc308120d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -30,8 +30,8 @@ type walker struct {
dir2 string
root1 *FileInfo
root2 *FileInfo
- idmap1 *idtools.IDMappings
- idmap2 *idtools.IDMappings
+ idmap1 *idtools.IDMappings //nolint:unused
+ idmap2 *idtools.IDMappings //nolint:unused
}
// collectFileInfoForChanges returns a complete representation of the trees
@@ -316,7 +316,11 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
dc := func(root, path string, fi os.FileInfo) (string, error) {
- return overlayDeletedFile(layers, root, path, fi)
+ r, err := overlayDeletedFile(layers, root, path, fi)
+ if err != nil {
+ return "", fmt.Errorf("overlay deleted file query: %w", err)
+ }
+ return r, nil
}
return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout)
}
@@ -351,7 +355,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
// If the directory isn't marked as opaque, then it's just a normal directory.
opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName())
if err != nil {
- return "", err
+ return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err)
}
if len(opaque) != 1 || opaque[0] != 'y' {
return "", err
@@ -397,5 +401,4 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
// We didn't find the same path in any older layers, so it was new in this one.
return "", nil
-
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
index bbbd8c9de..2965ccc9f 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
@@ -1,9 +1,10 @@
-// +build !linux
+//go:build !linux
package archive
import (
"fmt"
+ "io/fs"
"os"
"path/filepath"
"runtime"
@@ -29,7 +30,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
}()
// block until both routines have returned
- for i := 0; i < 2; i++ {
+ for range 2 {
if err := <-errs; err != nil {
return nil, nil, err
}
@@ -41,7 +42,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
root := newRootFileInfo(idMappings)
- err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ sourceStat, err := system.Lstat(sourceDir)
+ if err != nil {
+ return nil, err
+ }
+
+ err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
@@ -84,8 +90,15 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
if err != nil {
return err
}
- info.stat = s
+ // Don't cross mount points. This ignores file mounts to avoid
+ // generating a diff which deletes all files following the
+ // mount.
+ if s.Dev() != sourceStat.Dev() && s.IsDir() {
+ return filepath.SkipDir
+ }
+
+ info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
index 1cc1910f8..fb2cb70c2 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package archive
@@ -29,9 +29,10 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
if oldStat.Mode() != newStat.Mode() ||
ownerChanged ||
oldStat.Rdev() != newStat.Rdev() ||
+ oldStat.Flags() != newStat.Flags() ||
+ !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
// Don't look at size for dirs, its not a good measure of change
- (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
- (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ ((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) {
return true
}
return false
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
index 966400e59..1bab94fa5 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
@@ -7,7 +7,6 @@ import (
)
func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool {
-
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mtim() != newStat.Mtim() ||
oldStat.Mode() != newStat.Mode() ||
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
index 6298a674d..4d46167d7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -4,11 +4,11 @@ import (
"archive/tar"
"errors"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/sirupsen/logrus"
)
@@ -95,7 +95,7 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
- if _, err = os.Lstat(sourcePath); err != nil {
+ if err = fileutils.Lexists(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
@@ -255,7 +255,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
- return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ return dstInfo.Path, io.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
@@ -298,7 +298,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
-
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
index e305b5e4a..f57928244 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package archive
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index 14ffad5c0..ceaa8b0b7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -4,12 +4,13 @@ import (
"archive/tar"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
"strings"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
@@ -84,8 +85,8 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0600)
+ if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) {
+ err = os.MkdirAll(parentPath, 0o755)
if err != nil {
return 0, err
}
@@ -101,7 +102,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
- if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
+ if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
@@ -130,11 +131,11 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
- _, err := os.Lstat(dir)
+ err := fileutils.Lexists(dir)
if err != nil {
return 0, err
}
- err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
@@ -145,6 +146,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return nil
}
if _, exists := unpackedPaths[path]; !exists {
+ if err := resetImmutable(path, nil); err != nil {
+ return err
+ }
err := os.RemoveAll(path)
return err
}
@@ -156,6 +160,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
+ if err := resetImmutable(originalPath, nil); err != nil {
+ return 0, err
+ }
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
@@ -165,7 +172,15 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
+ //
+ // We always reset the immutable flag (if present) to allow metadata
+ // changes and to allow directory modification. The flag will be
+ // re-applied based on the contents of hdr either at the end for
+ // directories or in createTarFile otherwise.
if fi, err := os.Lstat(path); err == nil {
+ if err := resetImmutable(path, &fi); err != nil {
+ return 0, err
+ }
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
@@ -183,7 +198,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
- return 0, fmt.Errorf("Invalid aufs hardlink")
+ return 0, fmt.Errorf("invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
@@ -215,6 +230,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
+ if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
+ return 0, err
+ }
}
return size, nil
@@ -245,7 +263,9 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp
if err != nil {
return 0, err
}
- defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+ defer func() {
+ _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
+ }()
if decompress {
layer, err = DecompressStream(layer)
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
new file mode 100644
index 000000000..4da1ced3e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
@@ -0,0 +1,166 @@
+//go:build freebsd
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "math/bits"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/containers/storage/pkg/system"
+)
+
+const (
+ paxSCHILYFflags = "SCHILY.fflags"
+)
+
+var (
+ flagNameToValue = map[string]uint32{
+ "sappnd": system.SF_APPEND,
+ "sappend": system.SF_APPEND,
+ "arch": system.SF_ARCHIVED,
+ "archived": system.SF_ARCHIVED,
+ "schg": system.SF_IMMUTABLE,
+ "schange": system.SF_IMMUTABLE,
+ "simmutable": system.SF_IMMUTABLE,
+ "sunlnk": system.SF_NOUNLINK,
+ "sunlink": system.SF_NOUNLINK,
+ "snapshot": system.SF_SNAPSHOT,
+ "uappnd": system.UF_APPEND,
+ "uappend": system.UF_APPEND,
+ "uarch": system.UF_ARCHIVE,
+ "uarchive": system.UF_ARCHIVE,
+ "hidden": system.UF_HIDDEN,
+ "uhidden": system.UF_HIDDEN,
+ "uchg": system.UF_IMMUTABLE,
+ "uchange": system.UF_IMMUTABLE,
+ "uimmutable": system.UF_IMMUTABLE,
+ "uunlnk": system.UF_NOUNLINK,
+ "uunlink": system.UF_NOUNLINK,
+ "offline": system.UF_OFFLINE,
+ "uoffline": system.UF_OFFLINE,
+ "opaque": system.UF_OPAQUE,
+ "rdonly": system.UF_READONLY,
+ "urdonly": system.UF_READONLY,
+ "readonly": system.UF_READONLY,
+ "ureadonly": system.UF_READONLY,
+ "reparse": system.UF_REPARSE,
+ "ureparse": system.UF_REPARSE,
+ "sparse": system.UF_SPARSE,
+ "usparse": system.UF_SPARSE,
+ "system": system.UF_SYSTEM,
+ "usystem": system.UF_SYSTEM,
+ }
+ // Only include the short names for the reverse map
+ flagValueToName = map[uint32]string{
+ system.SF_APPEND: "sappnd",
+ system.SF_ARCHIVED: "arch",
+ system.SF_IMMUTABLE: "schg",
+ system.SF_NOUNLINK: "sunlnk",
+ system.SF_SNAPSHOT: "snapshot",
+ system.UF_APPEND: "uappnd",
+ system.UF_ARCHIVE: "uarch",
+ system.UF_HIDDEN: "hidden",
+ system.UF_IMMUTABLE: "uchg",
+ system.UF_NOUNLINK: "uunlnk",
+ system.UF_OFFLINE: "offline",
+ system.UF_OPAQUE: "opaque",
+ system.UF_READONLY: "rdonly",
+ system.UF_REPARSE: "reparse",
+ system.UF_SPARSE: "sparse",
+ system.UF_SYSTEM: "system",
+ }
+)
+
+func parseFileFlags(fflags string) (uint32, uint32, error) {
+ var set, clear uint32 = 0, 0
+ for _, fflag := range strings.Split(fflags, ",") {
+ isClear := false
+ if clean, ok := strings.CutPrefix(fflag, "no"); ok {
+ isClear = true
+ fflag = clean
+ }
+ if value, ok := flagNameToValue[fflag]; ok {
+ if isClear {
+ clear |= value
+ } else {
+ set |= value
+ }
+ } else {
+ return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag)
+ }
+ }
+ return set, clear, nil
+}
+
+func formatFileFlags(fflags uint32) (string, error) {
+ res := []string{}
+ for fflags != 0 {
+ // Extract lowest set bit
+ fflag := uint32(1) << bits.TrailingZeros32(fflags)
+ if name, ok := flagValueToName[fflag]; ok {
+ res = append(res, name)
+ } else {
+ return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag)
+ }
+ fflags &= ^fflag
+ }
+ return strings.Join(res, ","), nil
+}
+
+func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error {
+ st, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ fflags, err := formatFileFlags(st.Flags())
+ if err != nil {
+ return err
+ }
+ if fflags != "" {
+ if hdr.PAXRecords == nil {
+ hdr.PAXRecords = map[string]string{}
+ }
+ hdr.PAXRecords[paxSCHILYFflags] = fflags
+ }
+ return nil
+}
+
+func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error {
+ if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok {
+ var set, clear uint32
+ set, clear, err := parseFileFlags(fflags)
+ if err != nil {
+ return err
+ }
+
+ // Apply the delta to the existing file flags
+ st, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ return system.Lchflags(path, (st.Flags() & ^clear)|set)
+ }
+ return nil
+}
+
+func resetImmutable(path string, fi *os.FileInfo) error {
+ var flags uint32
+ if fi != nil {
+ flags = (*fi).Sys().(*syscall.Stat_t).Flags
+ } else {
+ st, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ flags = st.Flags()
+ }
+ if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 {
+ flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE)
+ return system.Lchflags(path, flags)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
new file mode 100644
index 000000000..5ad480f7c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
@@ -0,0 +1,20 @@
+//go:build !freebsd
+
+package archive
+
+import (
+ "archive/tar"
+ "os"
+)
+
+func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error {
+ return nil
+}
+
+func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error {
+ return nil
+}
+
+func resetImmutable(path string, fi *os.FileInfo) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/filter.go b/vendor/github.com/containers/storage/pkg/archive/filter.go
new file mode 100644
index 000000000..9902a1ef5
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/filter.go
@@ -0,0 +1,73 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os/exec"
+ "strings"
+ "sync"
+)
+
+var filterPath sync.Map
+
+func getFilterPath(name string) string {
+ path, ok := filterPath.Load(name)
+ if ok {
+ return path.(string)
+ }
+
+ path, err := exec.LookPath(name)
+ if err != nil {
+ path = ""
+ }
+
+ filterPath.Store(name, path)
+ return path.(string)
+}
+
+type errorRecordingReader struct {
+ r io.Reader
+ err error
+}
+
+func (r *errorRecordingReader) Read(p []byte) (int, error) {
+ n, err := r.r.Read(p)
+ if r.err == nil && err != io.EOF {
+ r.err = err
+ }
+ return n, err
+}
+
+// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout.
+// cleanup() is a caller provided function that will be called when the command finishes running, regardless of
+// whether it succeeds or fails.
+// If the command is not found, it returns (nil, false) and the cleanup function is not called.
+func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) {
+ path := getFilterPath(args[0])
+ if path == "" {
+ return nil, false
+ }
+
+ var stderrBuf bytes.Buffer
+
+ inputWithError := &errorRecordingReader{r: input}
+
+ r, w := io.Pipe()
+ cmd := exec.Command(path, args[1:]...)
+ cmd.Stdin = inputWithError
+ cmd.Stdout = w
+ cmd.Stderr = &stderrBuf
+ go func() {
+ err := cmd.Run()
+ // if there is an error reading from input, prefer to return that error
+ if inputWithError.err != nil {
+ err = inputWithError.err
+ } else if err != nil && stderrBuf.Len() > 0 {
+ err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err)
+ }
+ w.CloseWithError(err) // CloseWithErr(nil) == Close()
+ cleanup()
+ }()
+ return r, true
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
index e85aac054..3555d753a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux
package archive
diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go
index b39d12c87..903befd76 100644
--- a/vendor/github.com/containers/storage/pkg/archive/wrap.go
+++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go
@@ -17,8 +17,8 @@ import (
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
-// * ./foo.txt with content "hello world"
-// * ./empty with empty content
+// - ./foo.txt with content "hello world"
+// - ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index aacfee76f..710167d24 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -4,26 +4,16 @@ import (
stdtar "archive/tar"
"fmt"
"io"
- "io/ioutil"
- "net"
"os"
- "os/user"
"path/filepath"
"sync"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/unshare"
)
-func init() {
- // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
- // environment not in the chroot from untrusted files.
- _, _ = user.Lookup("storage")
- _, _ = net.LookupHost("localhost")
-}
-
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
archiver := archive.NewArchiver(idMappings)
@@ -41,7 +31,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true, dest)
}
@@ -72,11 +63,11 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
if options == nil {
options = &archive.TarOptions{}
- options.InUserNS = rsystem.RunningInUserNS()
+ options.InUserNS = unshare.IsRootless()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
@@ -86,13 +77,19 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
rootIDs := idMappings.RootPair()
dest = filepath.Clean(dest)
- if _, err := os.Stat(dest); os.IsNotExist(err) {
- if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
+ if err := fileutils.Exists(dest); os.IsNotExist(err) {
+ if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil {
return err
}
}
- r := ioutil.NopCloser(tarArchive)
+ destVal, err := newUnpackDestination(root, dest)
+ if err != nil {
+ return err
+ }
+ defer destVal.Close()
+
+ r := tarArchive
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
@@ -102,7 +99,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
r = decompressedArchive
}
- return invokeUnpack(r, dest, options, root)
+ return invokeUnpack(r, destVal, options)
}
// Tar tars the requested path while chrooted to the specified root.
@@ -124,7 +121,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -143,11 +140,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ if err == nil && hashError != nil {
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
new file mode 100644
index 000000000..caf348493
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
@@ -0,0 +1,34 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+type unpackDestination struct {
+ dest string
+}
+
+func (dst *unpackDestination) Close() error {
+ return nil
+}
+
+// newUnpackDestination is a no-op on this platform
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
+ return &unpackDestination{
+ dest: dest,
+ }, nil
+}
+
+func invokeUnpack(decompressedArchive io.Reader,
+ dest *unpackDestination,
+ options *archive.TarOptions,
+) error {
+ return archive.Unpack(decompressedArchive, dest.dest, options)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ _ = root // Restricting the operation to this root is not implemented on macOS
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 9da10fe33..88df9e56f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -1,23 +1,48 @@
-// +build !windows
+//go:build !windows && !darwin
package chrootarchive
import (
"bytes"
+ "errors"
"flag"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
"strings"
+ "golang.org/x/sys/unix"
+
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
)
+type unpackDestination struct {
+ root *os.File
+ dest string
+}
+
+func (dst *unpackDestination) Close() error {
+ return dst.root.Close()
+}
+
+// tarOptionsDescriptor is passed as an extra file
+const tarOptionsDescriptor = 3
+
+// rootFileDescriptor is passed as an extra file
+const rootFileDescriptor = 4
+
+// procPathForFd gives us a string for a descriptor.
+// Note that while Linux supports actually *reading* this
+// path, FreeBSD and other platforms don't; but in this codebase
+// we only compare strings.
+func procPathForFd(fd int) string {
+ return fmt.Sprintf("/proc/self/fd/%d", fd)
+}
+
// untar is the entry-point for storage-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
@@ -27,8 +52,8 @@ func untar() {
var options archive.TarOptions
- //read the options from the pipe "ExtraFiles"
- if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
+ // read the options from the pipe "ExtraFiles"
+ if err := json.NewDecoder(os.NewFile(tarOptionsDescriptor, "options")).Decode(&options); err != nil {
fatal(err)
}
@@ -38,7 +63,17 @@ func untar() {
root = flag.Arg(1)
}
- if root == "" {
+ // FreeBSD doesn't have proc/self, but we can handle it here
+ if root == procPathForFd(rootFileDescriptor) {
+ // Take ownership to ensure it's closed; no need to leak
+ // this afterwards.
+ rootFd := os.NewFile(rootFileDescriptor, "tar-root")
+ defer rootFd.Close()
+ if err := unix.Fchdir(int(rootFd.Fd())); err != nil {
+ fatal(err)
+ }
+ root = "."
+ } else if root == "" {
root = dst
}
@@ -57,11 +92,35 @@ func untar() {
os.Exit(0)
}
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+// newUnpackDestination takes a root directory and a destination which
+// must be underneath it, and returns an object that can unpack
+// in the target root using a file descriptor.
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
if root == "" {
- return errors.New("must specify a root to chroot to")
+ return nil, errors.New("must specify a root to chroot to")
+ }
+ relDest, err := filepath.Rel(root, dest)
+ if err != nil {
+ return nil, err
+ }
+ if relDest == "." {
+ relDest = "/"
+ }
+ if relDest[0] != '/' {
+ relDest = "/" + relDest
}
+ rootfdRaw, err := unix.Open(root, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, &fs.PathError{Op: "open", Path: root, Err: err}
+ }
+ return &unpackDestination{
+ root: os.NewFile(uintptr(rootfdRaw), "rootfs"),
+ dest: relDest,
+ }, nil
+}
+
+func invokeUnpack(decompressedArchive io.Reader, dest *unpackDestination, options *archive.TarOptions) error {
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
// when the full image list is passed (e.g. when this is used by
@@ -69,50 +128,42 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// child
r, w, err := os.Pipe()
if err != nil {
- return fmt.Errorf("Untar pipe failure: %v", err)
- }
-
- if root != "" {
- relDest, err := filepath.Rel(root, dest)
- if err != nil {
- return err
- }
- if relDest == "." {
- relDest = "/"
- }
- if relDest[0] != '/' {
- relDest = "/" + relDest
- }
- dest = relDest
+ return fmt.Errorf("untar pipe failure: %w", err)
}
- cmd := reexec.Command("storage-untar", dest, root)
+ cmd := reexec.Command("storage-untar", dest.dest, procPathForFd(rootFileDescriptor))
cmd.Stdin = decompressedArchive
- cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+ // If you change this, change tarOptionsDescriptor above
+ cmd.ExtraFiles = append(cmd.ExtraFiles, r) // fd 3
+ // If you change this, change rootFileDescriptor above too
+ cmd.ExtraFiles = append(cmd.ExtraFiles, dest.root) // fd 4
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+ return fmt.Errorf("untar error on re-exec cmd: %w", err)
}
- //write the options to the pipe for the untar exec to read
+ // write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+ return fmt.Errorf("untar json encode to pipe failed: %w", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
+ errorOut := fmt.Errorf("unpacking failed (error: %w; output: %s)", err, output)
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
- io.Copy(ioutil.Discard, decompressedArchive)
+ if _, err := io.Copy(io.Discard, decompressedArchive); err != nil {
+ return fmt.Errorf("%w\nexhausting input failed (error: %w)", errorOut, err)
+ }
- return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+ return errorOut
}
return nil
}
@@ -184,22 +235,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re
stdin, err := cmd.StdinPipe()
if err != nil {
- return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ return nil, fmt.Errorf("getting options pipe for tar process: %w", err)
}
if err := cmd.Start(); err != nil {
- return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ return nil, fmt.Errorf("tar error on re-exec cmd: %w", err)
}
go func() {
err := cmd.Wait()
- err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ if err != nil {
+ err = fmt.Errorf("processing tar file(%s): %w", errBuff, err)
+ }
tarW.CloseWithError(err)
}()
if err := json.NewEncoder(stdin).Encode(options); err != nil {
stdin.Close()
- return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ return nil, fmt.Errorf("tar json encode to pipe failed: %w", err)
}
stdin.Close()
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
index 8a5c680b1..6611cbade 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
@@ -7,18 +7,34 @@ import (
"github.com/containers/storage/pkg/longpath"
)
+type unpackDestination struct {
+ dest string
+}
+
+func (dst *unpackDestination) Close() error {
+ return nil
+}
+
+// newUnpackDestination is a no-op on this platform
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
+ return &unpackDestination{
+ dest: dest,
+ }, nil
+}
+
// chroot is not supported by Windows
func chroot(path string) error {
return nil
}
-func invokeUnpack(decompressedArchive io.ReadCloser,
- dest string,
- options *archive.TarOptions, root string) error {
+func invokeUnpack(decompressedArchive io.Reader,
+ dest *unpackDestination,
+ options *archive.TarOptions,
+) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
- return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
+ return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest.dest), options)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 76c94c6c1..3ca99a2c2 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -2,12 +2,13 @@ package chrootarchive
import (
"fmt"
- "io/ioutil"
+ "net"
"os"
+ "os/user"
"path/filepath"
"github.com/containers/storage/pkg/mount"
- "github.com/syndtr/gocapability/capability"
+ "github.com/moby/sys/capability"
"golang.org/x/sys/unix"
)
@@ -18,10 +19,18 @@ import (
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
- caps, err := capability.NewPid(0)
+ caps, err := capability.NewPid2(0)
if err != nil {
return err
}
+ if err := caps.Load(); err != nil {
+ return err
+ }
+
+ // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
+ // environment not in the chroot from untrusted files.
+ _, _ = user.Lookup("storage")
+ _, _ = net.LookupHost("localhost")
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) {
@@ -29,7 +38,7 @@ func chroot(path string) (err error) {
}
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
- return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+ return fmt.Errorf("creating mount namespace before pivot: %w", err)
}
// make everything in new ns private
@@ -44,9 +53,9 @@ func chroot(path string) (err error) {
}
// setup oldRoot for pivot_root
- pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+ pivotDir, err := os.MkdirTemp(path, ".pivot_root")
if err != nil {
- return fmt.Errorf("Error setting up pivot dir: %v", err)
+ return fmt.Errorf("setting up pivot dir: %w", err)
}
var mounted bool
@@ -65,7 +74,7 @@ func chroot(path string) (err error) {
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
- errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+ errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup)
if err == nil {
err = errCleanup
}
@@ -75,7 +84,7 @@ func chroot(path string) (err error) {
if err := unix.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
- return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+ return fmt.Errorf("cleaning up after failed pivot: %w", err)
}
return realChroot(path)
}
@@ -86,17 +95,17 @@ func chroot(path string) (err error) {
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root: %v", err)
+ return fmt.Errorf("changing to new root: %w", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
- return fmt.Errorf("Error making old root private after pivot: %v", err)
+ return fmt.Errorf("making old root private after pivot: %w", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
- return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+ return fmt.Errorf("while unmounting old root after pivot: %w", err)
}
mounted = false
@@ -105,10 +114,10 @@ func chroot(path string) (err error) {
func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
- return fmt.Errorf("Error after fallback to chroot: %v", err)
+ return fmt.Errorf("after fallback to chroot: %w", err)
}
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root after chroot: %v", err)
+ return fmt.Errorf("changing to new root after chroot: %w", err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
index 83278ee50..0aab11d22 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!linux
+//go:build !windows && !linux && !darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
new file mode 100644
index 000000000..7b4ea9e51
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
@@ -0,0 +1,40 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err)
+ }
+
+ s, err := archive.UnpackLayer(dest, layer, options)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err)
+ }
+
+ return s, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index c884d3784..ee215ce4f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -1,4 +1,4 @@
-//+build !windows
+//go:build !windows && !darwin
package chrootarchive
@@ -7,7 +7,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -15,7 +14,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/containers/storage/pkg/unshare"
)
type applyLayerResponse struct {
@@ -26,7 +25,6 @@ type applyLayerResponse struct {
// used on Windows as it does not support chroot, hence no point sandboxing
// through chroot and rexec.
func applyLayer() {
-
var (
tmpDir string
err error
@@ -35,17 +33,19 @@ func applyLayer() {
runtime.LockOSThread()
flag.Parse()
- inUserns := rsystem.RunningInUserNS()
+ inUserns := unshare.IsRootless()
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
// We need to be able to set any perms
- oldmask, err := system.Umask(0)
- defer system.Umask(oldmask)
+ oldMask, err := system.Umask(0)
if err != nil {
fatal(err)
}
+ defer func() {
+ _, _ = system.Umask(oldMask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
+ }()
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)
@@ -55,7 +55,7 @@ func applyLayer() {
options.InUserNS = true
}
- if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
+ if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil {
fatal(err)
}
@@ -68,7 +68,7 @@ func applyLayer() {
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
- fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err))
}
if _, err := flush(os.Stdin); err != nil {
@@ -94,7 +94,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
- if rsystem.RunningInUserNS() {
+ if unshare.IsRootless() {
options.InUserNS = true
}
}
@@ -104,25 +104,25 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
data, err := json.Marshal(options)
if err != nil {
- return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+ return 0, fmt.Errorf("ApplyLayer json encode: %w", err)
}
cmd := reexec.Command("storage-applyLayer", dest)
cmd.Stdin = layer
- cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+ cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
- return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+ return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
- return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err)
}
return response.LayerSize, nil
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
index 8f8e88bfb..8bfff5d65 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
@@ -3,7 +3,6 @@ package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
layer = decompressed
}
- tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
new file mode 100644
index 000000000..fa17c9bf8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index ea08135e4..0fd96190a 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -1,11 +1,10 @@
-// +build !windows
+//go:build !windows && !darwin
package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/containers/storage/pkg/reexec"
@@ -25,5 +24,5 @@ func fatal(err error) {
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
- return io.Copy(ioutil.Discard, r)
+ return io.Copy(io.Discard, r)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
index 63f970456..2245f894e 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
@@ -1,3 +1,5 @@
+//go:build !windows && !darwin
+
package chrootarchive
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
new file mode 100644
index 000000000..09e75680a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
@@ -0,0 +1,96 @@
+package chunked
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "io"
+
+ "github.com/docker/go-units"
+)
+
+const bloomFilterMaxLength = 100 * units.MB // max size for bloom filter
+
+type bloomFilter struct {
+ bitArray []uint64
+ k uint32
+}
+
+func newBloomFilter(size int, k uint32) *bloomFilter {
+ numElements := (size + 63) / 64
+ if numElements == 0 {
+ numElements = 1
+ }
+ return &bloomFilter{
+ bitArray: make([]uint64, numElements),
+ k: k,
+ }
+}
+
+func newBloomFilterFromArray(bitArray []uint64, k uint32) *bloomFilter {
+ return &bloomFilter{
+ bitArray: bitArray,
+ k: k,
+ }
+}
+
+func (bf *bloomFilter) hashFn(item []byte, seed uint32) (uint64, uint64) {
+ if len(item) == 0 {
+ return 0, 1
+ }
+ mod := uint32(len(bf.bitArray) * 64)
+ seedSplit := seed % uint32(len(item))
+ hash := (crc32.ChecksumIEEE(item[:seedSplit]) ^ crc32.ChecksumIEEE(item[seedSplit:])) % mod
+ return uint64(hash / 64), uint64(1 << (hash % 64))
+}
+
+func (bf *bloomFilter) add(item []byte) {
+ for i := uint32(0); i < bf.k; i++ {
+ index, mask := bf.hashFn(item, i)
+ bf.bitArray[index] |= mask
+ }
+}
+
+func (bf *bloomFilter) maybeContains(item []byte) bool {
+ for i := uint32(0); i < bf.k; i++ {
+ index, mask := bf.hashFn(item, i)
+ if bf.bitArray[index]&mask == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bf *bloomFilter) writeTo(writer io.Writer) error {
+ if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
+ return err
+ }
+ if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
+ return err
+ }
+ if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {
+ return err
+ }
+ return nil
+}
+
+func readBloomFilter(reader io.Reader) (*bloomFilter, error) {
+ var bloomFilterLen uint64
+ var k uint32
+
+ if err := binary.Read(reader, binary.LittleEndian, &bloomFilterLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(reader, binary.LittleEndian, &k); err != nil {
+ return nil, err
+ }
+ // sanity check
+ if bloomFilterLen > bloomFilterMaxLength {
+ return nil, fmt.Errorf("bloom filter length %d exceeds max length %d", bloomFilterLen, bloomFilterMaxLength)
+ }
+ bloomFilterArray := make([]uint64, bloomFilterLen)
+ if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil {
+ return nil, err
+ }
+ return newBloomFilterFromArray(bloomFilterArray, k), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
new file mode 100644
index 000000000..47742b05d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -0,0 +1,958 @@
+package chunked
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ storage "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/docker/go-units"
+ jsoniter "github.com/json-iterator/go"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ cacheKey = "chunked-manifest-cache"
+ cacheVersion = 3
+
+ digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+
+ // Using 3 hashes functions and n/m = 10 gives a false positive rate of ~1.7%:
+ // https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
+ bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries
+ bloomFilterHashes = 3 // number of hash functions for the bloom filter
+
+ maxTagsLen = 100 * units.MB // max size for tags len
+)
+
+type cacheFile struct {
+ tagLen int
+ digestLen int
+ fnamesLen int
+ tags []byte
+ vdata []byte
+ fnames []byte
+ bloomFilter *bloomFilter
+}
+
+type layer struct {
+ id string
+ cacheFile *cacheFile
+ target string
+ // mmapBuffer is nil when the cache file is fully loaded in memory.
+ // Otherwise it points to a mmap'ed buffer that is referenced by cacheFile.vdata.
+ mmapBuffer []byte
+
+ // reloadWithMmap is set when the current process generates the cache file,
+ // and cacheFile reuses the memory buffer used by the generation function.
+ // Next time the layer cache is used, attempt to reload the file using
+ // mmap.
+ reloadWithMmap bool
+}
+
+type layersCache struct {
+ layers []*layer
+ refs int
+ store storage.Store
+ mutex sync.RWMutex
+}
+
+var (
+ cacheMutex sync.Mutex
+ cache *layersCache
+)
+
+func (c *layer) release() {
+ runtime.SetFinalizer(c, nil)
+ if c.mmapBuffer != nil {
+ if err := unix.Munmap(c.mmapBuffer); err != nil {
+ logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
+ }
+ c.mmapBuffer = nil
+ }
+}
+
+func layerFinalizer(c *layer) {
+ c.release()
+}
+
+func (c *layersCache) release() {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
+ c.refs--
+ if c.refs != 0 {
+ return
+ }
+ for _, l := range c.layers {
+ l.release()
+ }
+ cache = nil
+}
+
+func getLayersCacheRef(store storage.Store) *layersCache {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+ if cache != nil && cache.store == store {
+ cache.refs++
+ return cache
+ }
+ cache = &layersCache{
+ store: store,
+ refs: 1,
+ }
+ return cache
+}
+
+func getLayersCache(store storage.Store) (*layersCache, error) {
+ c := getLayersCacheRef(store)
+
+ if err := c.load(); err != nil {
+ c.release()
+ return nil, err
+ }
+ return c, nil
+}
+
+// loadLayerBigData attempts to load the specified cacheKey from a file and mmap its content.
+// If the cache is not backed by a file, then it loads the entire content in memory.
+// Returns the cache content, and if mmap'ed, the mmap buffer to Munmap.
+func (c *layersCache) loadLayerBigData(layerID, bigDataKey string) ([]byte, []byte, error) {
+ inputFile, err := c.store.LayerBigData(layerID, bigDataKey)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer inputFile.Close()
+
+ // if the cache is backed by a file, attempt to mmap it.
+ if osFile, ok := inputFile.(*os.File); ok {
+ st, err := osFile.Stat()
+ if err != nil {
+ logrus.Warningf("Error stat'ing cache file for layer %q: %v", layerID, err)
+ goto fallback
+ }
+ size := st.Size()
+ if size == 0 {
+ logrus.Warningf("Cache file size is zero for layer %q: %v", layerID, err)
+ goto fallback
+ }
+ buf, err := unix.Mmap(int(osFile.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
+ if err != nil {
+ logrus.Warningf("Error mmap'ing cache file for layer %q: %v", layerID, err)
+ goto fallback
+ }
+ // best effort advise to the kernel.
+ _ = unix.Madvise(buf, unix.MADV_RANDOM)
+
+ return buf, buf, nil
+ }
+fallback:
+ buf, err := io.ReadAll(inputFile)
+ return buf, nil, err
+}
+
+func makeBinaryDigest(stringDigest string) ([]byte, error) {
+ d, err := digest.Parse(stringDigest)
+ if err != nil {
+ return nil, err
+ }
+ digestBytes, err := hex.DecodeString(d.Encoded())
+ if err != nil {
+ return nil, err
+ }
+ algo := []byte(d.Algorithm())
+ buf := make([]byte, 0, len(algo)+1+len(digestBytes))
+ buf = append(buf, algo...)
+ buf = append(buf, ':')
+ buf = append(buf, digestBytes...)
+ return buf, nil
+}
+
+// loadLayerCache attempts to load the cache file for the specified layer.
+// If the cache file is not present or it it using a different cache file version, then
+// the function returns (nil, nil).
+func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
+ buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ // there is no existing cache to load
+ if err != nil || buffer == nil {
+ return nil, nil
+ }
+ defer func() {
+ if errRet != nil && mmapBuffer != nil {
+ if err := unix.Munmap(mmapBuffer); err != nil {
+ logrus.Warnf("Error Munmap: layer %q: %v", layerID, err)
+ }
+ }
+ }()
+ cacheFile, err := readCacheFileFromMemory(buffer)
+ if err != nil {
+ return nil, err
+ }
+ if cacheFile == nil {
+ return nil, nil
+ }
+ return c.createLayer(layerID, cacheFile, mmapBuffer)
+}
+
+// createCacheFileFromTOC attempts to create a cache file for the specified layer.
+// If a TOC is not available, the cache won't be created and nil is returned.
+func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) {
+ clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ var lcd chunkedLayerData
+ if err == nil && clFile != nil {
+ defer clFile.Close()
+ cl, err := io.ReadAll(clFile)
+ if err != nil {
+ return nil, fmt.Errorf("open manifest file: %w", err)
+ }
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+ if err := json.Unmarshal(cl, &lcd); err != nil {
+ return nil, err
+ }
+ }
+ manifestReader, err := c.store.LayerBigData(layerID, bigDataKey)
+ if err != nil {
+ // the cache file is not needed since there is no manifest file.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ defer manifestReader.Close()
+
+ manifest, err := io.ReadAll(manifestReader)
+ if err != nil {
+ return nil, fmt.Errorf("read manifest file: %w", err)
+ }
+
+ cacheFile, err := writeCache(manifest, lcd.Format, layerID, c.store)
+ if err != nil {
+ return nil, err
+ }
+ l, err := c.createLayer(layerID, cacheFile, nil)
+ if err != nil {
+ return nil, err
+ }
+ l.reloadWithMmap = true
+ return l, nil
+}
+
+func (c *layersCache) load() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ loadedLayers := make(map[string]*layer)
+ for _, r := range c.layers {
+ loadedLayers[r.id] = r
+ }
+ allLayers, err := c.store.Layers()
+ if err != nil {
+ return err
+ }
+
+ var newLayers []*layer
+ for _, r := range allLayers {
+ // The layer is present in the store and it is already loaded. Attempt to
+ // reuse it if mmap'ed.
+ if l, found := loadedLayers[r.ID]; found {
+ // If the layer is not marked for re-load, move it to newLayers.
+ if !l.reloadWithMmap {
+ delete(loadedLayers, r.ID)
+ newLayers = append(newLayers, l)
+ continue
+ }
+ }
+ // try to read the existing cache file.
+ l, err := c.loadLayerCache(r.ID)
+ if err != nil {
+ logrus.Infof("Error loading cache file for layer %q: %v", r.ID, err)
+ }
+ if l != nil {
+ newLayers = append(newLayers, l)
+ continue
+ }
+
+ if r.ReadOnly {
+ // If the layer is coming from a read-only store, do not attempt
+ // to write to it.
+ // Therefore, we won’t find any matches in read-only-store layers,
+ // unless the read-only store layer comes prepopulated with cacheKey data.
+ continue
+ }
+
+ // the cache file is either not present or broken. Try to generate it from the TOC.
+ l, err = c.createCacheFileFromTOC(r.ID)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err)
+ }
+ if l != nil {
+ newLayers = append(newLayers, l)
+ }
+ }
+ // The layers that are still in loadedLayers are either stale or fully loaded in memory. Clean them up.
+ for _, l := range loadedLayers {
+ l.release()
+ }
+ c.layers = newLayers
+ return nil
+}
+
+// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
+// is usable for deduplication with hardlinks.
+// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
+func calculateHardLinkFingerprint(f *fileMetadata) (string, error) {
+ digester := digest.Canonical.Digester()
+
+ modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
+ hash := digester.Hash()
+
+ if _, err := hash.Write([]byte(f.Digest)); err != nil {
+ return "", err
+ }
+
+ if _, err := hash.Write([]byte(modeString)); err != nil {
+ return "", err
+ }
+
+ if len(f.Xattrs) > 0 {
+ keys := make([]string, 0, len(f.Xattrs))
+ for k := range f.Xattrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if _, err := hash.Write([]byte(k)); err != nil {
+ return "", err
+ }
+ if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
+ return "", err
+ }
+ }
+ }
+ return string(digester.Digest()), nil
+}
+
+// generateFileLocation generates a file location in the form $OFFSET$LEN$PATH_POS
+func generateFileLocation(pathPos int, offset, len uint64) []byte {
+ var buf []byte
+
+ buf = binary.AppendUvarint(buf, uint64(pathPos))
+ buf = binary.AppendUvarint(buf, offset)
+ buf = binary.AppendUvarint(buf, len)
+
+ return buf
+}
+
+// parseFileLocation reads what was written by generateFileLocation.
+func parseFileLocation(locationData []byte) (int, uint64, uint64, error) {
+ reader := bytes.NewReader(locationData)
+
+ pathPos, err := binary.ReadUvarint(reader)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ offset, err := binary.ReadUvarint(reader)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ len, err := binary.ReadUvarint(reader)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ return int(pathPos), offset, len, nil
+}
+
+// appendTag appends the $OFFSET$LEN information to the provided $DIGEST.
+// The [OFFSET; LEN] points to the variable length data where the file locations
+// are stored. $DIGEST has length digestLen stored in the cache file file header.
+func appendTag(digest []byte, offset, len uint64) ([]byte, error) {
+ digest = binary.LittleEndian.AppendUint64(digest, offset)
+ digest = binary.LittleEndian.AppendUint64(digest, len)
+ return digest, nil
+}
+
+type setBigData interface {
+ // SetLayerBigData stores a (possibly large) chunk of named data
+ SetLayerBigData(id, key string, data io.Reader) error
+}
+
+func bloomFilterFromTags(tags [][]byte, digestLen int) *bloomFilter {
+ bloomFilter := newBloomFilter(len(tags)*bloomFilterScale, bloomFilterHashes)
+ for _, t := range tags {
+ bloomFilter.add(t[:digestLen])
+ }
+ return bloomFilter
+}
+
+func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][]byte, tagLen, digestLen int, vdata, fnames bytes.Buffer, tagsBuffer *bytes.Buffer) error {
+ sort.Slice(tags, func(i, j int) bool {
+ return bytes.Compare(tags[i], tags[j]) == -1
+ })
+ for _, t := range tags {
+ if _, err := tagsBuffer.Write(t); err != nil {
+ return err
+ }
+ }
+
+ // version
+ if err := binary.Write(writer, binary.LittleEndian, uint64(cacheVersion)); err != nil {
+ return err
+ }
+
+ // len of a tag
+ if err := binary.Write(writer, binary.LittleEndian, uint64(tagLen)); err != nil {
+ return err
+ }
+
+ // len of a digest
+ if err := binary.Write(writer, binary.LittleEndian, uint64(digestLen)); err != nil {
+ return err
+ }
+
+ // bloom filter
+ if err := bloomFilter.writeTo(writer); err != nil {
+ return err
+ }
+
+ // tags length
+ if err := binary.Write(writer, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
+ return err
+ }
+
+ // vdata length
+ if err := binary.Write(writer, binary.LittleEndian, uint64(vdata.Len())); err != nil {
+ return err
+ }
+
+ // fnames length
+ if err := binary.Write(writer, binary.LittleEndian, uint64(fnames.Len())); err != nil {
+ return err
+ }
+
+ // tags
+ if _, err := writer.Write(tagsBuffer.Bytes()); err != nil {
+ return err
+ }
+
+ // variable length data
+ if _, err := writer.Write(vdata.Bytes()); err != nil {
+ return err
+ }
+
+ // file names
+ if _, err := writer.Write(fnames.Bytes()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// writeCache write a cache for the layer ID.
+// It generates a sorted list of digests with their offset to the path location and offset.
+// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
+// There are 3 kind of digests stored:
+// - digest(file.payload))
+// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
+// - digest(i) for each i in chunks(file payload)
+func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) {
+ var vdata, tagsBuffer, fnames bytes.Buffer
+ tagLen := 0
+ digestLen := 0
+
+ toc, err := prepareCacheFile(manifest, format)
+ if err != nil {
+ return nil, err
+ }
+
+ fnamesMap := make(map[string]int)
+ getFileNamePosition := func(name string) (int, error) {
+ if pos, found := fnamesMap[name]; found {
+ return pos, nil
+ }
+ pos := fnames.Len()
+ fnamesMap[name] = pos
+
+ if err := binary.Write(&fnames, binary.LittleEndian, uint32(len(name))); err != nil {
+ return 0, err
+ }
+ if _, err := fnames.WriteString(name); err != nil {
+ return 0, err
+ }
+ return pos, nil
+ }
+
+ var tags [][]byte
+ for _, k := range toc {
+ if k.Digest != "" {
+ digest, err := makeBinaryDigest(k.Digest)
+ if err != nil {
+ return nil, err
+ }
+ fileNamePos, err := getFileNamePosition(k.Name)
+ if err != nil {
+ return nil, err
+ }
+ location := generateFileLocation(fileNamePos, 0, uint64(k.Size))
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+
+ tag, err := appendTag(digest, off, l)
+ if err != nil {
+ return nil, err
+ }
+ if tagLen == 0 {
+ tagLen = len(tag)
+ }
+ if tagLen != len(tag) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, tag)
+
+ fp, err := calculateHardLinkFingerprint(k)
+ if err != nil {
+ return nil, err
+ }
+ digestHardLink, err := makeBinaryDigest(fp)
+ if err != nil {
+ return nil, err
+ }
+ tag, err = appendTag(digestHardLink, off, l)
+ if err != nil {
+ return nil, err
+ }
+ if tagLen != len(tag) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, tag)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+ digestLen = len(digestHardLink)
+ }
+ if k.ChunkDigest != "" {
+ fileNamePos, err := getFileNamePosition(k.Name)
+ if err != nil {
+ return nil, err
+ }
+ location := generateFileLocation(fileNamePos, uint64(k.ChunkOffset), uint64(k.ChunkSize))
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+
+ digest, err := makeBinaryDigest(k.ChunkDigest)
+ if err != nil {
+ return nil, err
+ }
+ d, err := appendTag(digest, off, l)
+ if err != nil {
+ return nil, err
+ }
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+ digestLen = len(digest)
+ }
+ }
+
+ bloomFilter := bloomFilterFromTags(tags, digestLen)
+
+ pipeReader, pipeWriter := io.Pipe()
+ errChan := make(chan error, 1)
+ go func() {
+ defer pipeWriter.Close()
+ defer close(errChan)
+
+ errChan <- writeCacheFileToWriter(pipeWriter, bloomFilter, tags, tagLen, digestLen, vdata, fnames, &tagsBuffer)
+ }()
+ defer pipeReader.Close()
+
+ counter := ioutils.NewWriteCounter(io.Discard)
+
+ r := io.TeeReader(pipeReader, counter)
+
+ if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
+ return nil, err
+ }
+
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
+
+ return &cacheFile{
+ digestLen: digestLen,
+ tagLen: tagLen,
+ tags: tagsBuffer.Bytes(),
+ vdata: vdata.Bytes(),
+ fnames: fnames.Bytes(),
+ fnamesLen: len(fnames.Bytes()),
+ bloomFilter: bloomFilter,
+ }, nil
+}
+
+// readCacheFileFromMemory reads a cache file from a buffer.
+// It can return (nil, nil) if the cache file uses a different file version that the one currently supported.
+func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
+ bigData := bytes.NewReader(bigDataBuffer)
+
+ var version, tagLen, digestLen, tagsLen, fnamesLen, vdataLen uint64
+ if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
+ return nil, err
+ }
+ if version != cacheVersion {
+ return nil, nil //nolint: nilnil
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
+ return nil, err
+ }
+
+ bloomFilter, err := readBloomFilter(bigData)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
+ return nil, err
+ }
+
+ if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil {
+ return nil, err
+ }
+
+ if tagsLen > maxTagsLen {
+ return nil, fmt.Errorf("tags len %d exceeds the maximum allowed size %d", tagsLen, maxTagsLen)
+ }
+ if digestLen > tagLen {
+ return nil, fmt.Errorf("digest len %d exceeds the tag len %d", digestLen, tagLen)
+ }
+
+ tags := make([]byte, tagsLen)
+ if _, err := bigData.Read(tags); err != nil {
+ return nil, err
+ }
+
+ // retrieve the unread part of the buffer.
+ remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():]
+
+ if vdataLen >= uint64(len(remaining)) {
+ return nil, fmt.Errorf("vdata len %d exceeds the remaining buffer size %d", vdataLen, len(remaining))
+ }
+
+ vdata := remaining[:vdataLen]
+ fnames := remaining[vdataLen:]
+
+ return &cacheFile{
+ bloomFilter: bloomFilter,
+ digestLen: int(digestLen),
+ fnames: fnames,
+ fnamesLen: int(fnamesLen),
+ tagLen: int(tagLen),
+ tags: tags,
+ vdata: vdata,
+ }, nil
+}
+
+func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([]*fileMetadata, error) {
+ toc, err := unmarshalToc(manifest)
+ if err != nil {
+ // ignore errors here. They might be caused by a different manifest format.
+ logrus.Debugf("could not unmarshal manifest: %v", err)
+ return nil, nil //nolint: nilnil
+ }
+
+ var entries []fileMetadata
+ for i := range toc.Entries {
+ entries = append(entries, fileMetadata{
+ FileMetadata: toc.Entries[i],
+ })
+ }
+
+ switch format {
+ case graphdriver.DifferOutputFormatDir:
+ case graphdriver.DifferOutputFormatFlat:
+ entries, err = makeEntriesFlat(entries)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unknown format %q", format)
+ }
+
+ var r []*fileMetadata
+ chunkSeen := make(map[string]bool)
+ for i := range entries {
+ d := entries[i].Digest
+ if d != "" {
+ r = append(r, &entries[i])
+ continue
+ }
+
+ // chunks do not use hard link dedup so keeping just one candidate is enough
+ cd := toc.Entries[i].ChunkDigest
+ if cd != "" && !chunkSeen[cd] {
+ r = append(r, &entries[i])
+ chunkSeen[cd] = true
+ }
+ }
+
+ return r, nil
+}
+
+func (c *layersCache) createLayer(id string, cacheFile *cacheFile, mmapBuffer []byte) (*layer, error) {
+ target, err := c.store.DifferTarget(id)
+ if err != nil {
+ return nil, fmt.Errorf("get checkout directory layer %q: %w", id, err)
+ }
+ l := &layer{
+ id: id,
+ cacheFile: cacheFile,
+ target: target,
+ mmapBuffer: mmapBuffer,
+ }
+ if mmapBuffer != nil {
+ runtime.SetFinalizer(l, layerFinalizer)
+ }
+ return l, nil
+}
+
+func findBinaryTag(binaryDigest []byte, cacheFile *cacheFile) (bool, uint64, uint64) {
+ nElements := len(cacheFile.tags) / cacheFile.tagLen
+
+ i := sort.Search(nElements, func(i int) bool {
+ d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]
+ return bytes.Compare(d, binaryDigest) >= 0
+ })
+ if i < nElements {
+ d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]
+ if bytes.Equal(binaryDigest, d) {
+ startOff := i*cacheFile.tagLen + cacheFile.digestLen
+
+ // check for corrupted data, there must be 2 u64 (off and len) after the digest.
+ if cacheFile.tagLen < cacheFile.digestLen+16 {
+ return false, 0, 0
+ }
+
+ offsetAndLen := cacheFile.tags[startOff : (i+1)*cacheFile.tagLen]
+
+ off := binary.LittleEndian.Uint64(offsetAndLen[:8])
+ len := binary.LittleEndian.Uint64(offsetAndLen[8:16])
+
+ return true, off, len
+ }
+ }
+ return false, 0, 0
+}
+
+func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
+ if digest == "" {
+ return "", "", -1, nil
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ binaryDigest, err := makeBinaryDigest(digest)
+ if err != nil {
+ return "", "", 0, err
+ }
+
+ for _, layer := range c.layers {
+ if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
+ continue
+ }
+ found, off, tagLen := findBinaryTag(binaryDigest, layer.cacheFile)
+ if found {
+ if uint64(len(layer.cacheFile.vdata)) < off+tagLen {
+ return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
+ }
+ fileLocationData := layer.cacheFile.vdata[off : off+tagLen]
+
+ fnamePosition, offFile, _, err := parseFileLocation(fileLocationData)
+ if err != nil {
+ return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
+ }
+
+ if len(layer.cacheFile.fnames) < fnamePosition+4 {
+ return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
+ }
+ lenPath := int(binary.LittleEndian.Uint32(layer.cacheFile.fnames[fnamePosition : fnamePosition+4]))
+
+ if len(layer.cacheFile.fnames) < fnamePosition+lenPath+4 {
+ return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
+ }
+ path := string(layer.cacheFile.fnames[fnamePosition+4 : fnamePosition+lenPath+4])
+
+ // parts[1] is the chunk length, currently unused.
+ return layer.target, path, int64(offFile), nil
+ }
+ }
+
+ return "", "", -1, nil
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// file is the file to look for.
+func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks bool) (string, string, error) {
+ digest := file.Digest
+ if useHardLinks {
+ var err error
+ digest, err = calculateHardLinkFingerprint(file)
+ if err != nil {
+ return "", "", err
+ }
+ }
+ target, name, off, err := c.findDigestInternal(digest)
+ if off == 0 {
+ return target, name, err
+ }
+ return "", "", nil
+}
+
+func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
+ return c.findDigestInternal(chunk.ChunkDigest)
+}
+
+func unmarshalToc(manifest []byte) (*internal.TOC, error) {
+ var toc internal.TOC
+
+ iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch strings.ToLower(field) {
+ case "version":
+ toc.Version = iter.ReadInt()
+
+ case "entries":
+ for iter.ReadArray() {
+ var m internal.FileMetadata
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch strings.ToLower(field) {
+ case "type":
+ m.Type = iter.ReadString()
+ case "name":
+ m.Name = iter.ReadString()
+ case "linkname":
+ m.Linkname = iter.ReadString()
+ case "mode":
+ m.Mode = iter.ReadInt64()
+ case "size":
+ m.Size = iter.ReadInt64()
+ case "uid":
+ m.UID = iter.ReadInt()
+ case "gid":
+ m.GID = iter.ReadInt()
+ case "modtime":
+ time, err := time.Parse(time.RFC3339, iter.ReadString())
+ if err != nil {
+ return nil, err
+ }
+ m.ModTime = &time
+ case "accesstime":
+ time, err := time.Parse(time.RFC3339, iter.ReadString())
+ if err != nil {
+ return nil, err
+ }
+ m.AccessTime = &time
+ case "changetime":
+ time, err := time.Parse(time.RFC3339, iter.ReadString())
+ if err != nil {
+ return nil, err
+ }
+ m.ChangeTime = &time
+ case "devmajor":
+ m.Devmajor = iter.ReadInt64()
+ case "devminor":
+ m.Devminor = iter.ReadInt64()
+ case "digest":
+ m.Digest = iter.ReadString()
+ case "offset":
+ m.Offset = iter.ReadInt64()
+ case "endoffset":
+ m.EndOffset = iter.ReadInt64()
+ case "chunksize":
+ m.ChunkSize = iter.ReadInt64()
+ case "chunkoffset":
+ m.ChunkOffset = iter.ReadInt64()
+ case "chunkdigest":
+ m.ChunkDigest = iter.ReadString()
+ case "chunktype":
+ m.ChunkType = iter.ReadString()
+ case "xattrs":
+ m.Xattrs = make(map[string]string)
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ m.Xattrs[key] = iter.ReadString()
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
+ m.Digest = digestSha256Empty
+ }
+ toc.Entries = append(toc.Entries, m)
+ }
+
+ case "tarsplitdigest": // strings.ToLower("tarSplitDigest")
+ s := iter.ReadString()
+ d, err := digest.Parse(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid tarSplitDigest %q: %w", s, err)
+ }
+ toc.TarSplitDigest = d
+
+ default:
+ iter.Skip()
+ }
+ }
+
+ // validate there is no extra data in the provided input. This is a security measure to avoid
+ // that the digest we calculate for the TOC refers to the entire document.
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil, iter.Error
+ }
+ if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) {
+ return nil, fmt.Errorf("unexpected data after manifest")
+ }
+
+ return &toc, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
new file mode 100644
index 000000000..e828d479f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -0,0 +1,25 @@
+package chunked
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/containers/storage/pkg/chunked/internal"
+)
+
+const (
+ TypeReg = internal.TypeReg
+ TypeChunk = internal.TypeChunk
+ TypeLink = internal.TypeLink
+ TypeChar = internal.TypeChar
+ TypeBlock = internal.TypeBlock
+ TypeDir = internal.TypeDir
+ TypeFifo = internal.TypeFifo
+ TypeSymlink = internal.TypeSymlink
+)
+
+// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
+func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ return compressor.ZstdCompressor(r, metadata, level)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
new file mode 100644
index 000000000..2dac46354
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
@@ -0,0 +1,428 @@
+package chunked
+
+import (
+ archivetar "archive/tar"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "strconv"
+ "time"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/klauspost/compress/zstd"
+ "github.com/klauspost/pgzip"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+ expMaps "golang.org/x/exp/maps"
+)
+
+var typesToTar = map[string]byte{
+ TypeReg: tar.TypeReg,
+ TypeLink: tar.TypeLink,
+ TypeChar: tar.TypeChar,
+ TypeBlock: tar.TypeBlock,
+ TypeDir: tar.TypeDir,
+ TypeFifo: tar.TypeFifo,
+ TypeSymlink: tar.TypeSymlink,
+}
+
+func typeToTarType(t string) (byte, error) {
+ r, found := typesToTar[t]
+ if !found {
+ return 0, fmt.Errorf("unknown type: %v", t)
+ }
+ return r, nil
+}
+
+func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
+ // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
+ footerSize := int64(51)
+ if blobSize <= footerSize {
+ return nil, 0, errors.New("blob too small")
+ }
+ chunk := ImageSourceChunk{
+ Offset: uint64(blobSize - footerSize),
+ Length: uint64(footerSize),
+ }
+ parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
+ if err != nil {
+ return nil, 0, err
+ }
+ var reader io.ReadCloser
+ select {
+ case r := <-parts:
+ reader = r
+ case err := <-errs:
+ return nil, 0, err
+ }
+ defer reader.Close()
+ footer := make([]byte, footerSize)
+ if _, err := io.ReadFull(reader, footer); err != nil {
+ return nil, 0, err
+ }
+
+ /* Read the ToC offset:
+ - 10 bytes gzip header
+ - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
+ - 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+ - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
+ - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
+ - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
+ - 8 bytes gzip footer
+ */
+ tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
+ if err != nil {
+ return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
+ }
+
+ size := int64(blobSize - footerSize - tocOffset)
+ // set a reasonable limit
+ if size > (1<<20)*50 {
+ return nil, 0, errors.New("manifest too big")
+ }
+
+ chunk = ImageSourceChunk{
+ Offset: uint64(tocOffset),
+ Length: uint64(size),
+ }
+ parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
+ if err != nil {
+ return nil, 0, err
+ }
+
+ var tocReader io.ReadCloser
+ select {
+ case r := <-parts:
+ tocReader = r
+ case err := <-errs:
+ return nil, 0, err
+ }
+ defer tocReader.Close()
+
+ r, err := pgzip.NewReader(tocReader)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer r.Close()
+
+ aTar := archivetar.NewReader(r)
+
+ header, err := aTar.Next()
+ if err != nil {
+ return nil, 0, err
+ }
+ // set a reasonable limit
+ if header.Size > (1<<20)*50 {
+ return nil, 0, errors.New("manifest too big")
+ }
+
+ manifestUncompressed := make([]byte, header.Size)
+ if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
+ return nil, 0, err
+ }
+
+ manifestDigester := digest.Canonical.Digester()
+ manifestChecksum := manifestDigester.Hash()
+ if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
+ return nil, 0, err
+ }
+
+ if manifestDigester.Digest() != tocDigest {
+ return nil, 0, errors.New("invalid manifest checksum")
+ }
+
+ return manifestUncompressed, tocOffset, nil
+}
+
+// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
+// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
+func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
+ offsetMetadata := annotations[internal.ManifestInfoKey]
+ if offsetMetadata == "" {
+ return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
+ }
+ var manifestChunk ImageSourceChunk
+ var manifestLengthUncompressed, manifestType uint64
+ if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil {
+ return nil, nil, nil, 0, err
+ }
+ // The tarSplit… values are valid if tarSplitChunk.Offset > 0
+ var tarSplitChunk ImageSourceChunk
+ var tarSplitLengthUncompressed uint64
+ if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
+ if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
+ return nil, nil, nil, 0, err
+ }
+ }
+
+ if manifestType != internal.ManifestTypeCRFS {
+ return nil, nil, nil, 0, errors.New("invalid manifest type")
+ }
+
+ // set a reasonable limit
+ if manifestChunk.Length > (1<<20)*50 {
+ return nil, nil, nil, 0, errors.New("manifest too big")
+ }
+ if manifestLengthUncompressed > (1<<20)*50 {
+ return nil, nil, nil, 0, errors.New("manifest too big")
+ }
+
+ chunks := []ImageSourceChunk{manifestChunk}
+ if tarSplitChunk.Offset > 0 {
+ chunks = append(chunks, tarSplitChunk)
+ }
+ parts, errs, err := blobStream.GetBlobAt(chunks)
+ if err != nil {
+ return nil, nil, nil, 0, err
+ }
+
+ readBlob := func(len uint64) ([]byte, error) {
+ var reader io.ReadCloser
+ select {
+ case r := <-parts:
+ reader = r
+ case err := <-errs:
+ return nil, err
+ }
+
+ blob := make([]byte, len)
+ if _, err := io.ReadFull(reader, blob); err != nil {
+ reader.Close()
+ return nil, err
+ }
+ if err := reader.Close(); err != nil {
+ return nil, err
+ }
+ return blob, nil
+ }
+
+ manifest, err := readBlob(manifestChunk.Length)
+ if err != nil {
+ return nil, nil, nil, 0, err
+ }
+
+ decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String())
+ if err != nil {
+ return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err)
+ }
+ toc, err := unmarshalToc(decodedBlob)
+ if err != nil {
+ return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
+ }
+
+ var decodedTarSplit []byte = nil
+ if toc.TarSplitDigest != "" {
+ if tarSplitChunk.Offset <= 0 {
+ return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
+ }
+ tarSplit, err := readBlob(tarSplitChunk.Length)
+ if err != nil {
+ return nil, nil, nil, 0, err
+ }
+ decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
+ if err != nil {
+ return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
+ }
+ // We use the TOC for creating on-disk files, but the tar-split for creating metadata
+ // when exporting the layer contents. Ensure the two match, otherwise local inspection of a container
+ // might be misleading about the exported contents.
+ if err := ensureTOCMatchesTarSplit(toc, decodedTarSplit); err != nil {
+ return nil, nil, nil, 0, fmt.Errorf("tar-split and TOC data is inconsistent: %w", err)
+ }
+ } else if tarSplitChunk.Offset > 0 {
+ // We must ignore the tar-split when the digest is not present in the TOC, because we can’t authenticate it.
+ //
+ // But if we asked for the chunk, now we must consume the data to not block the producer.
+ // Ideally the GetBlobAt API should be changed so that this is not necessary.
+ _, err := readBlob(tarSplitChunk.Length)
+ if err != nil {
+ return nil, nil, nil, 0, err
+ }
+ }
+ return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
+}
+
+// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
+func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
+ pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
+ for i := range toc.Entries {
+ e := &toc.Entries[i]
+ if e.Type != internal.TypeChunk {
+ if _, ok := pendingFiles[e.Name]; ok {
+ return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
+ }
+ pendingFiles[e.Name] = e
+ }
+ }
+
+ unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
+ if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
+ e, ok := pendingFiles[hdr.Name]
+ if !ok {
+ return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
+ }
+ delete(pendingFiles, hdr.Name)
+ expected, err := internal.NewFileMetadata(hdr)
+ if err != nil {
+ return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
+ }
+ if err := ensureFileMetadataAttributesMatch(e, &expected); err != nil {
+ return fmt.Errorf("TOC and tar-split metadata doesn’t match: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+ if len(pendingFiles) != 0 {
+ remaining := expMaps.Keys(pendingFiles)
+ if len(remaining) > 5 {
+ remaining = remaining[:5] // Just to limit the size of the output.
+ }
+ return fmt.Errorf("TOC contains entries not present in tar-split, incl. %q", remaining)
+ }
+ return nil
+}
+
+// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
+func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
+ var res int64 = 0
+
+ unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
+ for {
+ entry, err := unpacker.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return -1, fmt.Errorf("reading tar-split entries: %w", err)
+ }
+ switch entry.Type {
+ case storage.SegmentType:
+ res += int64(len(entry.Payload))
+ case storage.FileType:
+ // entry.Size is the “logical sizeâ€, which might not be the physical size for sparse entries;
+ // but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents,
+ // sparse files are not supported.
+ // Also https://github.com/opencontainers/image-spec/blob/main/layer.md says
+ // > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations.
+ res += entry.Size
+ default:
+ return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type)
+ }
+ }
+ return res, nil
+}
+
+// ensureTimePointersMatch ensures that a and b are equal
+func ensureTimePointersMatch(a, b *time.Time) error {
+ // We didn’t always use “timeIfNotZero†when creating the TOC, so treat time.IsZero the same as nil.
+ // The archive/tar code turns time.IsZero() timestamps into an Unix timestamp of 0 when writing, but turns an Unix timestamp of 0
+ // when writing into a (local-timezone) Jan 1 1970, which is not IsZero(). So, treat that the same as IsZero as well.
+ unixZero := time.Unix(0, 0)
+ if a != nil && (a.IsZero() || a.Equal(unixZero)) {
+ a = nil
+ }
+ if b != nil && (b.IsZero() || b.Equal(unixZero)) {
+ b = nil
+ }
+ switch {
+ case a == nil && b == nil:
+ return nil
+ case a == nil:
+ return fmt.Errorf("nil != %v", *b)
+ case b == nil:
+ return fmt.Errorf("%v != nil", *a)
+ default:
+ if a.Equal(*b) {
+ return nil
+ }
+ return fmt.Errorf("%v != %v", *a, *b)
+ }
+}
+
+// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
+// in the tar stream or matching contents)
+func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
+ // Keep this in sync with internal.FileMetadata!
+
+ if a.Type != b.Type {
+ return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
+ }
+ if a.Name != b.Name {
+ return fmt.Errorf("mismatch of Name: %q != %q", a.Name, b.Name)
+ }
+ if a.Linkname != b.Linkname {
+ return fmt.Errorf("mismatch of Linkname: %q != %q", a.Linkname, b.Linkname)
+ }
+ if a.Mode != b.Mode {
+ return fmt.Errorf("mismatch of Mode: %q != %q", a.Mode, b.Mode)
+ }
+ if a.Size != b.Size {
+ return fmt.Errorf("mismatch of Size: %q != %q", a.Size, b.Size)
+ }
+ if a.UID != b.UID {
+ return fmt.Errorf("mismatch of UID: %q != %q", a.UID, b.UID)
+ }
+ if a.GID != b.GID {
+ return fmt.Errorf("mismatch of GID: %q != %q", a.GID, b.GID)
+ }
+
+ if err := ensureTimePointersMatch(a.ModTime, b.ModTime); err != nil {
+ return fmt.Errorf("mismatch of ModTime: %w", err)
+ }
+ if err := ensureTimePointersMatch(a.AccessTime, b.AccessTime); err != nil {
+ return fmt.Errorf("mismatch of AccessTime: %w", err)
+ }
+ if err := ensureTimePointersMatch(a.ChangeTime, b.ChangeTime); err != nil {
+ return fmt.Errorf("mismatch of ChangeTime: %w", err)
+ }
+ if a.Devmajor != b.Devmajor {
+ return fmt.Errorf("mismatch of Devmajor: %q != %q", a.Devmajor, b.Devmajor)
+ }
+ if a.Devminor != b.Devminor {
+ return fmt.Errorf("mismatch of Devminor: %q != %q", a.Devminor, b.Devminor)
+ }
+ if !maps.Equal(a.Xattrs, b.Xattrs) {
+ return fmt.Errorf("mismatch of Xattrs: %q != %q", a.Xattrs, b.Xattrs)
+ }
+
+ // Digest is not compared
+ // Offset is not compared
+ // EndOffset is not compared
+
+ // ChunkSize is not compared
+ // ChunkOffset is not compared
+ // ChunkDigest is not compared
+ // ChunkType is not compared
+ return nil
+}
+
+func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
+ d, err := digest.Parse(expectedCompressedChecksum)
+ if err != nil {
+ return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
+ }
+
+ blobDigester := d.Algorithm().Digester()
+ blobChecksum := blobDigester.Hash()
+ if _, err := blobChecksum.Write(blob); err != nil {
+ return nil, err
+ }
+ if blobDigester.Digest() != d {
+ return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
+ }
+
+ decoder, err := zstd.NewReader(nil) //nolint:contextcheck
+ if err != nil {
+ return nil, err
+ }
+ defer decoder.Close()
+
+ b := make([]byte, 0, lengthUncompressed)
+ return decoder.DecodeAll(blob, b)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
new file mode 100644
index 000000000..654969749
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -0,0 +1,500 @@
+package compressor
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/klauspost/compress/zstd"
+ "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+const (
+ RollsumBits = 16
+ holesThreshold = int64(1 << 10)
+)
+
+type holesFinder struct {
+ reader *bufio.Reader
+ zeros int64
+ threshold int64
+
+ state int
+}
+
+const (
+ holesFinderStateRead = iota
+ holesFinderStateAccumulate
+ holesFinderStateFound
+ holesFinderStateEOF
+)
+
+// readByte reads a single byte from the underlying reader.
+// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
+// If there are at least f.THRESHOLD consecutive zeros, then the
+// return value is (N_CONSECUTIVE_ZEROS, '\x00').
+func (f *holesFinder) readByte() (int64, byte, error) {
+ for {
+ switch f.state {
+ // reading the file stream
+ case holesFinderStateRead:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ return 0, b, err
+ }
+
+ if b != 0 {
+ return 0, b, err
+ }
+
+ f.zeros = 1
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ } else {
+ f.state = holesFinderStateAccumulate
+ }
+ // accumulating zeros, but still didn't reach the threshold
+ case holesFinderStateAccumulate:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ continue
+ }
+ return 0, b, err
+ }
+
+ if b == 0 {
+ f.zeros++
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ }
+ } else {
+ if err := f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+ }
+ // found a hole. Number of zeros >= threshold
+ case holesFinderStateFound:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ }
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ if b != 0 {
+ if err := f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ f.zeros++
+ // reached EOF. Flush pending zeros if any.
+ case holesFinderStateEOF:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ return 0, 0, io.EOF
+ }
+ }
+}
+
+type rollingChecksumReader struct {
+ reader *holesFinder
+ closed bool
+ rollsum *RollSum
+ pendingHole int64
+
+ // WrittenOut is the total number of bytes read from
+ // the stream.
+ WrittenOut int64
+
+ // IsLastChunkZeros tells whether the last generated
+ // chunk is a hole (made of consecutive zeros). If it
+ // is false, then the last chunk is a data chunk
+ // generated by the rolling checksum.
+ IsLastChunkZeros bool
+}
+
+func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
+ rc.IsLastChunkZeros = false
+
+ if rc.pendingHole > 0 {
+ toCopy := int64(len(b))
+ if rc.pendingHole < toCopy {
+ toCopy = rc.pendingHole
+ }
+ rc.pendingHole -= toCopy
+ for i := int64(0); i < toCopy; i++ {
+ b[i] = 0
+ }
+
+ rc.WrittenOut += toCopy
+
+ rc.IsLastChunkZeros = true
+
+ // if there are no other zeros left, terminate the chunk
+ return rc.pendingHole == 0, int(toCopy), nil
+ }
+
+ if rc.closed {
+ return false, 0, io.EOF
+ }
+
+ for i := 0; i < len(b); i++ {
+ holeLen, n, err := rc.reader.readByte()
+ if err != nil {
+ if err == io.EOF {
+ rc.closed = true
+ if i == 0 {
+ return false, 0, err
+ }
+ return false, i, nil
+ }
+ // Report any other error type
+ return false, -1, err
+ }
+ if holeLen > 0 {
+ for j := int64(0); j < holeLen; j++ {
+ rc.rollsum.Roll(0)
+ }
+ rc.pendingHole = holeLen
+ return true, i, nil
+ }
+ b[i] = n
+ rc.WrittenOut++
+ rc.rollsum.Roll(n)
+ if rc.rollsum.OnSplitWithBits(RollsumBits) {
+ return true, i + 1, nil
+ }
+ }
+ return false, len(b), nil
+}
+
+type chunk struct {
+ ChunkOffset int64
+ Offset int64
+ Checksum string
+ ChunkSize int64
+ ChunkType string
+}
+
+type tarSplitData struct {
+ compressed *bytes.Buffer
+ digester digest.Digester
+ uncompressedCounter *ioutils.WriteCounter
+ zstd *zstd.Encoder
+ packer storage.Packer
+}
+
+func newTarSplitData(level int) (*tarSplitData, error) {
+ compressed := bytes.NewBuffer(nil)
+ digester := digest.Canonical.Digester()
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
+ if err != nil {
+ return nil, err
+ }
+
+ uncompressedCounter := ioutils.NewWriteCounter(zstdWriter)
+ metaPacker := storage.NewJSONPacker(uncompressedCounter)
+
+ return &tarSplitData{
+ compressed: compressed,
+ digester: digester,
+ uncompressedCounter: uncompressedCounter,
+ zstd: zstdWriter,
+ packer: metaPacker,
+ }, nil
+}
+
+func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
+ // total written so far. Used to retrieve partial offsets in the file
+ dest := ioutils.NewWriteCounter(destFile)
+
+ tarSplitData, err := newTarSplitData(level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if tarSplitData.zstd != nil {
+ tarSplitData.zstd.Close()
+ }
+ }()
+
+ its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil)
+ if err != nil {
+ return err
+ }
+
+ tr := tar.NewReader(its)
+ tr.RawAccounting = true
+
+ buf := make([]byte, 4096)
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if zstdWriter != nil {
+ zstdWriter.Close()
+ }
+ }()
+
+ restartCompression := func() (int64, error) {
+ var offset int64
+ if zstdWriter != nil {
+ if err := zstdWriter.Close(); err != nil {
+ return 0, err
+ }
+ offset = dest.Count
+ zstdWriter.Reset(dest)
+ }
+ return offset, nil
+ }
+
+ var metadata []internal.FileMetadata
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+
+ payloadDigester := digest.Canonical.Digester()
+ chunkDigester := digest.Canonical.Digester()
+
+ // Now handle the payload, if any
+ startOffset := int64(0)
+ lastOffset := int64(0)
+ lastChunkOffset := int64(0)
+
+ checksum := ""
+
+ chunks := []chunk{}
+
+ hf := &holesFinder{
+ threshold: holesThreshold,
+ reader: bufio.NewReader(tr),
+ }
+
+ rcReader := &rollingChecksumReader{
+ reader: hf,
+ rollsum: NewRollSum(),
+ }
+
+ payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
+ for {
+ mustSplit, read, errRead := rcReader.Read(buf)
+ if errRead != nil && errRead != io.EOF {
+ return err
+ }
+ // restart the compression only if there is a payload.
+ if read > 0 {
+ if startOffset == 0 {
+ startOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ lastOffset = startOffset
+ }
+
+ if _, err := payloadDest.Write(buf[:read]); err != nil {
+ return err
+ }
+ }
+ if (mustSplit || errRead == io.EOF) && startOffset > 0 {
+ off, err := restartCompression()
+ if err != nil {
+ return err
+ }
+
+ chunkSize := rcReader.WrittenOut - lastChunkOffset
+ if chunkSize > 0 {
+ chunkType := internal.ChunkTypeData
+ if rcReader.IsLastChunkZeros {
+ chunkType = internal.ChunkTypeZeros
+ }
+
+ chunks = append(chunks, chunk{
+ ChunkOffset: lastChunkOffset,
+ Offset: lastOffset,
+ Checksum: chunkDigester.Digest().String(),
+ ChunkSize: chunkSize,
+ ChunkType: chunkType,
+ })
+ }
+
+ lastOffset = off
+ lastChunkOffset = rcReader.WrittenOut
+ chunkDigester = digest.Canonical.Digester()
+ payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
+ }
+ if errRead == io.EOF {
+ if startOffset > 0 {
+ checksum = payloadDigester.Digest().String()
+ }
+ break
+ }
+ }
+
+ mainEntry, err := internal.NewFileMetadata(hdr)
+ if err != nil {
+ return err
+ }
+ mainEntry.Digest = checksum
+ mainEntry.Offset = startOffset
+ mainEntry.EndOffset = lastOffset
+ entries := []internal.FileMetadata{mainEntry}
+ for i := 1; i < len(chunks); i++ {
+ entries = append(entries, internal.FileMetadata{
+ Type: internal.TypeChunk,
+ Name: hdr.Name,
+ ChunkOffset: chunks[i].ChunkOffset,
+ })
+ }
+ if len(chunks) > 1 {
+ for i := range chunks {
+ entries[i].ChunkSize = chunks[i].ChunkSize
+ entries[i].Offset = chunks[i].Offset
+ entries[i].ChunkDigest = chunks[i].Checksum
+ entries[i].ChunkType = chunks[i].ChunkType
+ }
+ }
+ metadata = append(metadata, entries...)
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+
+ // make sure the entire tarball is flushed to the output as it might contain
+ // some trailing zeros that affect the checksum.
+ if _, err := io.Copy(zstdWriter, its); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+
+ if err := zstdWriter.Flush(); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ zstdWriter = nil
+
+ if err := tarSplitData.zstd.Flush(); err != nil {
+ return err
+ }
+ if err := tarSplitData.zstd.Close(); err != nil {
+ return err
+ }
+ tarSplitData.zstd = nil
+
+ ts := internal.TarSplitData{
+ Data: tarSplitData.compressed.Bytes(),
+ Digest: tarSplitData.digester.Digest(),
+ UncompressedSize: tarSplitData.uncompressedCounter.Count,
+ }
+
+ return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
+}
+
+type zstdChunkedWriter struct {
+ tarSplitOut *io.PipeWriter
+ tarSplitErr chan error
+}
+
+func (w zstdChunkedWriter) Close() error {
+ errClose := w.tarSplitOut.Close()
+
+ if err := <-w.tarSplitErr; err != nil && err != io.EOF {
+ return err
+ }
+ return errClose
+}
+
+func (w zstdChunkedWriter) Write(p []byte) (int, error) {
+ select {
+ case err := <-w.tarSplitErr:
+ w.tarSplitOut.Close()
+ return 0, err
+ default:
+ return w.tarSplitOut.Write(p)
+ }
+}
+
+// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
+// compressed separately so it can be addressed separately. Idea based on CRFS:
+// https://github.com/google/crfs
+// The difference with CRFS is that the zstd compression is used instead of gzip.
+// The reason for it is that zstd supports embedding metadata ignored by the decoder
+// as part of the compressed stream.
+// A manifest json file with all the metadata is appended at the end of the tarball
+// stream, using zstd skippable frames.
+// The final file will look like:
+// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
+// Where:
+// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
+// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
+// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
+// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
+func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
+ ch := make(chan error, 1)
+ r, w := io.Pipe()
+
+ go func() {
+ ch <- writeZstdChunkedStream(out, metadata, r, level)
+ _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates.
+ r.Close()
+ close(ch)
+ }()
+
+ return zstdChunkedWriter{
+ tarSplitOut: w,
+ tarSplitErr: ch,
+ }, nil
+}
+
+// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level == nil {
+ l := 10
+ level = &l
+ }
+
+ return zstdChunkedWriterWithLevel(r, metadata, *level)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
new file mode 100644
index 000000000..59df6901e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2011 The Perkeep Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rollsum implements rolling checksums similar to apenwarr's bup, which
+// is similar to librsync.
+//
+// The bup project is at https://github.com/apenwarr/bup and its splitting in
+// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
+package compressor
+
+import (
+ "math/bits"
+)
+
+const (
+ windowSize = 64 // Roll assumes windowSize is a power of 2
+ charOffset = 31
+)
+
+const (
+ blobBits = 13
+ blobSize = 1 << blobBits // 8k
+)
+
+type RollSum struct {
+ s1, s2 uint32
+ window [windowSize]uint8
+ wofs int
+}
+
+func NewRollSum() *RollSum {
+ return &RollSum{
+ s1: windowSize * charOffset,
+ s2: windowSize * (windowSize - 1) * charOffset,
+ }
+}
+
+func (rs *RollSum) add(drop, add uint32) {
+ s1 := rs.s1 + add - drop
+ rs.s1 = s1
+ rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
+}
+
+// Roll adds ch to the rolling sum.
+func (rs *RollSum) Roll(ch byte) {
+ wp := &rs.window[rs.wofs]
+ rs.add(uint32(*wp), uint32(ch))
+ *wp = ch
+ rs.wofs = (rs.wofs + 1) & (windowSize - 1)
+}
+
+// OnSplit reports whether at least 13 consecutive trailing bits of
+// the current checksum are set the same way.
+func (rs *RollSum) OnSplit() bool {
+ return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
+}
+
+// OnSplitWithBits reports whether at least n consecutive trailing bits
+// of the current checksum are set the same way.
+func (rs *RollSum) OnSplitWithBits(n uint32) bool {
+ mask := (uint32(1) << n) - 1
+ return rs.s2&mask == (^uint32(0))&mask
+}
+
+func (rs *RollSum) Bits() int {
+ rsum := rs.Digest() >> (blobBits + 1)
+ return blobBits + bits.TrailingZeros32(^rsum)
+}
+
+func (rs *RollSum) Digest() uint32 {
+ return (rs.s1 << 16) | (rs.s2 & 0xffff)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
new file mode 100644
index 000000000..d98cee09d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
@@ -0,0 +1,275 @@
+//go:build unix
+
+package dump
+
+import (
+ "bufio"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ ESCAPE_STANDARD = 0
+ NOESCAPE_SPACE = 1 << iota
+ ESCAPE_EQUAL
+ ESCAPE_LONE_DASH
+)
+
+func escaped(val []byte, escape int) string {
+ noescapeSpace := escape&NOESCAPE_SPACE != 0
+ escapeEqual := escape&ESCAPE_EQUAL != 0
+ escapeLoneDash := escape&ESCAPE_LONE_DASH != 0
+
+ if escapeLoneDash && len(val) == 1 && val[0] == '-' {
+ return fmt.Sprintf("\\x%.2x", val[0])
+ }
+
+ // This is intended to match the C isprint API with LC_CTYPE=C
+ isprint := func(c byte) bool {
+ return c >= 32 && c < 127
+ }
+ // This is intended to match the C isgraph API with LC_CTYPE=C
+ isgraph := func(c byte) bool {
+ return c > 32 && c < 127
+ }
+
+ var result string
+ for _, c := range []byte(val) {
+ hexEscape := false
+ var special string
+
+ switch c {
+ case '\\':
+ special = "\\\\"
+ case '\n':
+ special = "\\n"
+ case '\r':
+ special = "\\r"
+ case '\t':
+ special = "\\t"
+ case '=':
+ hexEscape = escapeEqual
+ default:
+ if noescapeSpace {
+ hexEscape = !isprint(c)
+ } else {
+ hexEscape = !isgraph(c)
+ }
+ }
+
+ if special != "" {
+ result += special
+ } else if hexEscape {
+ result += fmt.Sprintf("\\x%.2x", c)
+ } else {
+ result += string(c)
+ }
+ }
+ return result
+}
+
+func escapedOptional(val []byte, escape int) string {
+ if len(val) == 0 {
+ return "-"
+ }
+ return escaped(val, escape)
+}
+
+func getStMode(mode uint32, typ string) (uint32, error) {
+ switch typ {
+ case internal.TypeReg, internal.TypeLink:
+ mode |= unix.S_IFREG
+ case internal.TypeChar:
+ mode |= unix.S_IFCHR
+ case internal.TypeBlock:
+ mode |= unix.S_IFBLK
+ case internal.TypeDir:
+ mode |= unix.S_IFDIR
+ case internal.TypeFifo:
+ mode |= unix.S_IFIFO
+ case internal.TypeSymlink:
+ mode |= unix.S_IFLNK
+ default:
+ return 0, fmt.Errorf("unknown type %s", typ)
+ }
+ return mode, nil
+}
+
+func sanitizeName(name string) string {
+ path := filepath.Clean(name)
+ if path == "." {
+ path = "/"
+ } else if path[0] != '/' {
+ path = "/" + path
+ }
+ return path
+}
+
+func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
+ path := sanitizeName(entry.Name)
+
+ parent := filepath.Dir(path)
+ if _, found := added[parent]; !found && path != "/" {
+ parentEntry := &internal.FileMetadata{
+ Name: parent,
+ Type: internal.TypeDir,
+ Mode: 0o755,
+ }
+ if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
+ return err
+ }
+
+ }
+ if e, found := added[path]; found {
+ // if the entry was already added, make sure it has the same data
+ if !reflect.DeepEqual(*e, *entry) {
+ return fmt.Errorf("entry %q already added with different data", path)
+ }
+ return nil
+ }
+ added[path] = entry
+
+ if _, err := fmt.Fprint(out, escaped([]byte(path), ESCAPE_STANDARD)); err != nil {
+ return err
+ }
+
+ nlinks := links[entry.Name] + links[entry.Linkname] + 1
+ link := ""
+ if entry.Type == internal.TypeLink {
+ link = "@"
+ }
+
+ rdev := unix.Mkdev(uint32(entry.Devmajor), uint32(entry.Devminor))
+
+ entryTime := entry.ModTime
+ if entryTime == nil {
+ t := time.Unix(0, 0)
+ entryTime = &t
+ }
+
+ mode, err := getStMode(uint32(entry.Mode), entry.Type)
+ if err != nil {
+ return err
+ }
+
+ if _, err := fmt.Fprintf(out, " %d %s%o %d %d %d %d %d.%d ", entry.Size,
+ link, mode,
+ nlinks, entry.UID, entry.GID, rdev,
+ entryTime.Unix(), entryTime.Nanosecond()); err != nil {
+ return err
+ }
+
+ var payload string
+ if entry.Linkname != "" {
+ if entry.Type == internal.TypeSymlink {
+ payload = entry.Linkname
+ } else {
+ payload = sanitizeName(entry.Linkname)
+ }
+ } else {
+ if len(entry.Digest) > 10 {
+ d := strings.Replace(entry.Digest, "sha256:", "", 1)
+ payload = d[:2] + "/" + d[2:]
+ }
+ }
+
+ if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
+ return err
+ }
+
+ /* inline content. */
+ if _, err := fmt.Fprint(out, " -"); err != nil {
+ return err
+ }
+
+ /* store the digest. */
+ if _, err := fmt.Fprint(out, " "); err != nil {
+ return err
+ }
+ digest := verityDigests[payload]
+ if _, err := fmt.Fprint(out, escapedOptional([]byte(digest), ESCAPE_LONE_DASH)); err != nil {
+ return err
+ }
+
+ for k, vEncoded := range entry.Xattrs {
+ v, err := base64.StdEncoding.DecodeString(vEncoded)
+ if err != nil {
+ return fmt.Errorf("decode xattr %q: %w", k, err)
+ }
+ name := escaped([]byte(k), ESCAPE_EQUAL)
+
+ value := escaped(v, ESCAPE_EQUAL)
+ if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil {
+ return err
+ }
+ }
+ if _, err := fmt.Fprint(out, "\n"); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
+func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
+ toc, ok := tocI.(*internal.TOC)
+ if !ok {
+ return nil, fmt.Errorf("invalid TOC type")
+ }
+ pipeR, pipeW := io.Pipe()
+ go func() {
+ closed := false
+ w := bufio.NewWriter(pipeW)
+ defer func() {
+ if !closed {
+ w.Flush()
+ pipeW.Close()
+ }
+ }()
+
+ links := make(map[string]int)
+ added := make(map[string]*internal.FileMetadata)
+ for _, e := range toc.Entries {
+ if e.Linkname == "" {
+ continue
+ }
+ if e.Type == internal.TypeSymlink {
+ continue
+ }
+ links[e.Linkname] = links[e.Linkname] + 1
+ }
+
+ if len(toc.Entries) == 0 {
+ root := &internal.FileMetadata{
+ Name: "/",
+ Type: internal.TypeDir,
+ Mode: 0o755,
+ }
+
+ if err := dumpNode(w, added, links, verityDigests, root); err != nil {
+ pipeW.CloseWithError(err)
+ closed = true
+ return
+ }
+ }
+
+ for _, e := range toc.Entries {
+ if e.Type == internal.TypeChunk {
+ continue
+ }
+ if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
+ pipeW.CloseWithError(err)
+ closed = true
+ return
+ }
+ }
+ }()
+ return pipeR, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
new file mode 100644
index 000000000..e26e5e5c5
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
@@ -0,0 +1,605 @@
+package chunked
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ driversCopy "github.com/containers/storage/drivers/copy"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked/internal"
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "github.com/vbatts/tar-split/archive/tar"
+ "golang.org/x/sys/unix"
+)
+
+// procPathForFile returns an absolute path in /proc which
+// refers to the file; see procPathForFd.
+func procPathForFile(f *os.File) string {
+ return procPathForFd(int(f.Fd()))
+}
+
+// procPathForFd returns an absolute path in /proc which
+// refers to the file; this allows passing a file descriptor
+// in places that don't accept a file descriptor.
+func procPathForFd(fd int) string {
+ return fmt.Sprintf("/proc/self/fd/%d", fd)
+}
+
+// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
+// are not part of the TOC document.
+// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
+type fileMetadata struct {
+ internal.FileMetadata
+
+ // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
+ chunks []*internal.FileMetadata
+
+ // skipSetAttrs is set when the file attributes must not be
+ // modified, e.g. it is a hard link from a different source,
+ // or a composefs file.
+ skipSetAttrs bool
+}
+
+func doHardLink(dirfd, srcFd int, destFile string) error {
+ destDir, destBase := filepath.Split(destFile)
+ destDirFd := dirfd
+ if destDir != "" && destDir != "." {
+ f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ destDirFd = int(f.Fd())
+ }
+
+ doLink := func() error {
+ // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
+ // /proc/self/fd doesn't and can be used with rootless.
+ srcPath := procPathForFd(srcFd)
+ err := unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
+ if err != nil {
+ return &fs.PathError{Op: "linkat", Path: destFile, Err: err}
+ }
+ return nil
+ }
+
+ err := doLink()
+
+ // if the destination exists, unlink it first and try again
+ if err != nil && os.IsExist(err) {
+ if err := unix.Unlinkat(destDirFd, destBase, 0); err != nil {
+ return err
+ }
+ return doLink()
+ }
+ return err
+}
+
+func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
+ destFile := fileMetadata.Name
+ src := procPathForFd(srcFd)
+ st, err := os.Stat(src)
+ if err != nil {
+ return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
+ }
+
+ copyWithFileRange, copyWithFileClone := true, true
+
+ if useHardLinks {
+ err := doHardLink(dirfd, srcFd, destFile)
+ if err == nil {
+ // if the file was deduplicated with a hard link, skip overriding file metadata.
+ fileMetadata.skipSetAttrs = true
+ return nil, st.Size(), nil
+ }
+ }
+
+ // If the destination file already exists, we shouldn't blow it away
+ dstFile, err := openFileUnderRoot(dirfd, destFile, newFileFlags, mode)
+ if err != nil {
+ return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
+ }
+
+ err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone)
+ if err != nil {
+ dstFile.Close()
+ return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
+ }
+ return dstFile, st.Size(), nil
+}
+
+func timeToTimespec(time *time.Time) (ts unix.Timespec) {
+ if time == nil || time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return unix.NsecToTimespec(time.UnixNano())
+}
+
+// chown changes the owner and group of the file at the specified path under the directory
+// pointed by dirfd.
+// If nofollow is true, the function will not follow symlinks.
+// If path is empty, the function will change the owner and group of the file descriptor.
+// absolutePath is the absolute path of the file, used only for error messages.
+func chown(dirfd int, path string, uid, gid int, nofollow bool, absolutePath string) error {
+ var err error
+ flags := 0
+ if nofollow {
+ flags |= unix.AT_SYMLINK_NOFOLLOW
+ } else if path == "" {
+ flags |= unix.AT_EMPTY_PATH
+ }
+ err = unix.Fchownat(dirfd, path, uid, gid, flags)
+ if err == nil {
+ return nil
+ }
+ if errors.Is(err, syscall.EINVAL) {
+ return fmt.Errorf(`potentially insufficient UIDs or GIDs available in the user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, path, err)
+ }
+ return &fs.PathError{Op: "fchownat", Path: absolutePath, Err: err}
+}
+
+// setFileAttrs sets the file attributes for file given metadata
+func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
+ if metadata.skipSetAttrs {
+ return nil
+ }
+ if file == nil {
+ return errors.New("invalid file")
+ }
+ fd := int(file.Fd())
+
+ t, err := typeToTarType(metadata.Type)
+ if err != nil {
+ return err
+ }
+
+ // If it is a symlink, force to use the path
+ if t == tar.TypeSymlink {
+ usePath = true
+ }
+
+ baseName := ""
+ if usePath {
+ dirName := filepath.Dir(metadata.Name)
+ if dirName != "" {
+ parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+ defer parentFd.Close()
+
+ dirfd = int(parentFd.Fd())
+ }
+ baseName = filepath.Base(metadata.Name)
+ }
+
+ doChown := func() error {
+ var err error
+ if usePath {
+ err = chown(dirfd, baseName, metadata.UID, metadata.GID, true, metadata.Name)
+ } else {
+ err = chown(fd, "", metadata.UID, metadata.GID, false, metadata.Name)
+ }
+ if options.IgnoreChownErrors {
+ return nil
+ }
+ return err
+ }
+
+ doSetXattr := func(k string, v []byte) error {
+ err := unix.Fsetxattr(fd, k, v, 0)
+ if err != nil {
+ return &fs.PathError{Op: "fsetxattr", Path: metadata.Name, Err: err}
+ }
+ return nil
+ }
+
+ doUtimes := func() error {
+ ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
+ var err error
+ if usePath {
+ err = unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
+ } else {
+ err = unix.UtimesNanoAt(unix.AT_FDCWD, procPathForFd(fd), ts, 0)
+ }
+ if err != nil {
+ return &fs.PathError{Op: "utimensat", Path: metadata.Name, Err: err}
+ }
+ return nil
+ }
+
+ doChmod := func() error {
+ var err error
+ op := ""
+ if usePath {
+ err = unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+ op = "fchmodat"
+ } else {
+ err = unix.Fchmod(fd, uint32(mode))
+ op = "fchmod"
+ }
+ if err != nil {
+ return &fs.PathError{Op: op, Path: metadata.Name, Err: err}
+ }
+ return nil
+ }
+
+ if err := doChown(); err != nil {
+ return err
+ }
+
+ canIgnore := func(err error) bool {
+ return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
+ }
+
+ for k, v := range metadata.Xattrs {
+ if _, found := xattrsToIgnore[k]; found {
+ continue
+ }
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return fmt.Errorf("decode xattr %q: %w", v, err)
+ }
+ if err := doSetXattr(k, data); !canIgnore(err) {
+ return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
+ }
+ }
+
+ if err := doUtimes(); !canIgnore(err) {
+ return err
+ }
+
+ if err := doChmod(); !canIgnore(err) {
+ return err
+ }
+ return nil
+}
+
+func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+ root := procPathForFd(dirfd)
+
+ targetRoot, err := os.Readlink(root)
+ if err != nil {
+ return -1, err
+ }
+
+ hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
+
+ var fd int
+ // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
+ // last component as the path to openat().
+ if hasNoFollow {
+ dirName, baseName := filepath.Split(name)
+ if dirName != "" && dirName != "." {
+ newRoot, err := securejoin.SecureJoin(root, dirName)
+ if err != nil {
+ return -1, err
+ }
+ root = newRoot
+ }
+
+ parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return -1, &fs.PathError{Op: "open", Path: root, Err: err}
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err = unix.Openat(parentDirfd, baseName, int(flags), uint32(mode))
+ if err != nil {
+ return -1, &fs.PathError{Op: "openat", Path: name, Err: err}
+ }
+ } else {
+ newPath, err := securejoin.SecureJoin(root, name)
+ if err != nil {
+ return -1, err
+ }
+ fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
+ if err != nil {
+ return -1, &fs.PathError{Op: "openat", Path: newPath, Err: err}
+ }
+ }
+
+ target, err := os.Readlink(procPathForFd(fd))
+ if err != nil {
+ unix.Close(fd)
+ return -1, err
+ }
+
+ // Add an additional check to make sure the opened fd is inside the rootfs
+ if !strings.HasPrefix(target, targetRoot) {
+ unix.Close(fd)
+ return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name)
+ }
+
+ return fd, err
+}
+
+func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+ how := unix.OpenHow{
+ Flags: flags,
+ Mode: uint64(mode & 0o7777),
+ Resolve: unix.RESOLVE_IN_ROOT,
+ }
+ fd, err := unix.Openat2(dirfd, name, &how)
+ if err != nil {
+ return -1, &fs.PathError{Op: "openat2", Path: name, Err: err}
+ }
+ return fd, nil
+}
+
+// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
+// using it again.
+var skipOpenat2 int32
+
+// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
+// userspace lookup.
+func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+ var fd int
+ var err error
+ if name == "" {
+ fd, err := unix.Dup(dirfd)
+ if err != nil {
+ return -1, fmt.Errorf("failed to duplicate file descriptor %d: %w", dirfd, err)
+ }
+ return fd, nil
+ }
+ if atomic.LoadInt32(&skipOpenat2) > 0 {
+ fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+ } else {
+ fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
+ // If the function failed with ENOSYS, switch off the support for openat2
+ // and fallback to using safejoin.
+ if err != nil && errors.Is(err, unix.ENOSYS) {
+ atomic.StoreInt32(&skipOpenat2, 1)
+ fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+ }
+ }
+ return fd, err
+}
+
+// openFileUnderRoot safely opens a file under the specified root directory using openat2
+// dirfd is an open file descriptor to the target checkout directory.
+// name is the path to open relative to dirfd.
+// flags are the flags to pass to the open syscall.
+// mode specifies the mode to use for newly created files.
+func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) (*os.File, error) {
+ fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+
+ hasCreate := (flags & unix.O_CREAT) != 0
+ if errors.Is(err, unix.ENOENT) && hasCreate {
+ parent := filepath.Dir(name)
+ if parent != "" {
+ newDirfd, err2 := openOrCreateDirUnderRoot(dirfd, parent, 0)
+ if err2 == nil {
+ defer newDirfd.Close()
+ fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+ }
+ }
+ }
+ return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
+}
+
+// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
+// dirfd is an open file descriptor to the target checkout directory.
+// name is the path to open relative to dirfd.
+// mode specifies the mode to use for newly created files.
+func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) {
+ fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+
+ if errors.Is(err, unix.ENOENT) {
+ parent := filepath.Dir(name)
+ if parent != "" {
+ pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
+ if err2 != nil {
+ return nil, err
+ }
+ defer pDir.Close()
+
+ baseName := filepath.Base(name)
+
+ if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, uint32(mode)); err2 != nil {
+ return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2}
+ }
+
+ fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+ }
+ }
+ return nil, err
+}
+
+// appendHole creates a hole with the specified size at the open fd.
+// fd is the open file descriptor.
+// name is the path to use for error messages.
+// size is the size of the hole to create.
+func appendHole(fd int, name string, size int64) error {
+ off, err := unix.Seek(fd, size, unix.SEEK_CUR)
+ if err != nil {
+ return &fs.PathError{Op: "seek", Path: name, Err: err}
+ }
+ // Make sure the file size is changed. It might be the last hole and no other data written afterwards.
+ if err := unix.Ftruncate(fd, off); err != nil {
+ return &fs.PathError{Op: "ftruncate", Path: name, Err: err}
+ }
+ return nil
+}
+
+func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
+ parent, base := filepath.Split(name)
+ parentFd := dirfd
+ if parent != "" && parent != "." {
+ parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
+ if err != nil {
+ return err
+ }
+ defer parentFile.Close()
+ parentFd = int(parentFile.Fd())
+ }
+
+ if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
+ if !os.IsExist(err) {
+ return &fs.PathError{Op: "mkdirat", Path: name, Err: err}
+ }
+ }
+
+ file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return setFileAttrs(dirfd, file, mode, metadata, options, false)
+}
+
+func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
+ sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+ defer sourceFile.Close()
+
+ err = doHardLink(dirfd, int(sourceFile.Fd()), metadata.Name)
+ if err != nil {
+ return err
+ }
+
+ newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
+ if err != nil {
+ // If the target is a symlink, open the file with O_PATH.
+ if errors.Is(err, unix.ELOOP) {
+ newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_PATH|unix.O_NOFOLLOW, 0)
+ if err != nil {
+ return err
+ }
+ defer newFile.Close()
+
+ return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
+ }
+ return err
+ }
+ defer newFile.Close()
+
+ return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
+}
+
+func safeSymlink(dirfd int, metadata *fileMetadata) error {
+ destDir, destBase := filepath.Split(metadata.Name)
+ destDirFd := dirfd
+ if destDir != "" && destDir != "." {
+ f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ destDirFd = int(f.Fd())
+ }
+
+ if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
+ return &fs.PathError{Op: "symlinkat", Path: metadata.Name, Err: err}
+ }
+ return nil
+}
+
+type whiteoutHandler struct {
+ Dirfd int
+ Root string
+}
+
+func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
+ file, err := openOrCreateDirUnderRoot(d.Dirfd, path, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
+ return &fs.PathError{Op: "fsetxattr", Path: path, Err: err}
+ }
+ return nil
+}
+
+func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
+ dir, base := filepath.Split(path)
+ dirfd := d.Dirfd
+ if dir != "" && dir != "." {
+ dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+
+ dirfd = int(dir.Fd())
+ }
+
+ if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
+ return &fs.PathError{Op: "mknodat", Path: path, Err: err}
+ }
+
+ return nil
+}
+
+func (d whiteoutHandler) Chown(path string, uid, gid int) error {
+ file, err := openFileUnderRoot(d.Dirfd, path, unix.O_PATH, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return chown(int(file.Fd()), "", uid, gid, false, path)
+}
+
+type readerAtCloser interface {
+ io.ReaderAt
+ io.Closer
+}
+
+// seekableFile is a struct that wraps an *os.File to provide an ImageSourceSeekable.
+type seekableFile struct {
+ reader readerAtCloser
+}
+
+func (f *seekableFile) Close() error {
+ return f.reader.Close()
+}
+
+func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+
+ go func() {
+ for _, chunk := range chunks {
+ streams <- io.NopCloser(io.NewSectionReader(f.reader, int64(chunk.Offset), int64(chunk.Length)))
+ }
+ close(streams)
+ close(errs)
+ }()
+
+ return streams, errs, nil
+}
+
+func newSeekableFile(reader readerAtCloser) *seekableFile {
+ return &seekableFile{reader: reader}
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
new file mode 100644
index 000000000..2a24e4bf2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -0,0 +1,324 @@
+package internal
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/archive"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/klauspost/compress/zstd"
+ "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+// TOC is short for Table of Contents and is used by the zstd:chunked
+// file format to effectively add an overall index into the contents
+// of a tarball; it also includes file metadata.
+type TOC struct {
+ // Version is currently expected to be 1
+ Version int `json:"version"`
+ // Entries is the list of file metadata in this TOC.
+ // The ordering in this array currently defaults to being the same
+ // as that of the tar stream; however, this should not be relied on.
+ Entries []FileMetadata `json:"entries"`
+ // TarSplitDigest is the checksum of the "tar-split" data which
+ // is included as a distinct skippable zstd frame before the TOC.
+ TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
+}
+
+// FileMetadata is an entry in the TOC that includes both generic file metadata
+// that duplicates what can found in the tar header (and should match), but
+// also special/custom content (see below).
+//
+// Regular files may optionally be represented as a sequence of “chunksâ€,
+// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries
+// are heuristically determined to increase chance of chunk matching / reuse
+// similar to rsync). In that case, the regular file is represented
+// as an initial TypeReg entry (with all metadata for the file as a whole)
+// immediately followed by zero or more TypeChunk entries (containing only Type,
+// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk*
+// fields are relevant in all of these entries, including the initial
+// TypeReg one.
+//
+// Note that the metadata here, when fetched by a zstd:chunked aware client,
+// is used instead of that in the tar stream. The contents of the tar stream
+// are not used in this scenario.
+type FileMetadata struct {
+ // If you add any fields, update ensureFileMetadataMatches as well!
+
+ // The metadata below largely duplicates that in the tar headers.
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Linkname string `json:"linkName,omitempty"`
+ Mode int64 `json:"mode,omitempty"`
+ Size int64 `json:"size,omitempty"`
+ UID int `json:"uid,omitempty"`
+ GID int `json:"gid,omitempty"`
+ ModTime *time.Time `json:"modtime,omitempty"`
+ AccessTime *time.Time `json:"accesstime,omitempty"`
+ ChangeTime *time.Time `json:"changetime,omitempty"`
+ Devmajor int64 `json:"devMajor,omitempty"`
+ Devminor int64 `json:"devMinor,omitempty"`
+ Xattrs map[string]string `json:"xattrs,omitempty"`
+ // Digest is a hexadecimal sha256 checksum of the file contents; it
+ // is empty for empty files
+ Digest string `json:"digest,omitempty"`
+ Offset int64 `json:"offset,omitempty"`
+ EndOffset int64 `json:"endOffset,omitempty"`
+
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+ ChunkType string `json:"chunkType,omitempty"`
+}
+
+const (
+ ChunkTypeData = ""
+ ChunkTypeZeros = "zeros"
+)
+
+const (
+ // The following types correspond to regular types of entries that can
+ // appear in a tar archive.
+ TypeReg = "reg"
+ TypeLink = "hardlink"
+ TypeChar = "char"
+ TypeBlock = "block"
+ TypeDir = "dir"
+ TypeFifo = "fifo"
+ TypeSymlink = "symlink"
+ // TypeChunk is special; in zstd:chunked not only are files individually
+ // compressed and indexable, there is a "rolling checksum" used to compute
+ // "chunks" of individual file contents, that are also added to the TOC
+ TypeChunk = "chunk"
+)
+
+var TarTypes = map[byte]string{
+ tar.TypeReg: TypeReg,
+ tar.TypeLink: TypeLink,
+ tar.TypeChar: TypeChar,
+ tar.TypeBlock: TypeBlock,
+ tar.TypeDir: TypeDir,
+ tar.TypeFifo: TypeFifo,
+ tar.TypeSymlink: TypeSymlink,
+}
+
+func GetType(t byte) (string, error) {
+ r, found := TarTypes[t]
+ if !found {
+ return "", fmt.Errorf("unknown tarball type: %v", t)
+ }
+ return r, nil
+}
+
+const (
+ // ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest.
+ ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
+ // ManifestInfoKey is an annotation that signals the start of the TOC (manifest)
+ // contents which are embedded as a skippable zstd frame. It has a format of
+ // four decimal integers separated by `:` as follows:
+ // :::
+ // The is ManifestTypeCRFS which should have the value `1`.
+ ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
+ // TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata
+ // contents which are embedded as a skippable zstd frame. It has a format of
+ // three decimal integers separated by `:` as follows:
+ // ::
+ TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
+
+ // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead.
+ // The value is retained here as a constant as a historical reference for older zstd:chunked images.
+ // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
+
+ // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
+ ManifestTypeCRFS = 1
+
+ // FooterSizeSupported is the footer size supported by this implementation.
+ // Newer versions of the image format might increase this value, so reject
+ // any version that is not supported.
+ FooterSizeSupported = 64
+)
+
+var (
+ // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
+ // will ignore it.
+ // https://tools.ietf.org/html/rfc8478#section-3.1.2
+ skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
+
+ ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
+)
+
+func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
+ if _, err := dest.Write(skippableFrameMagic); err != nil {
+ return err
+ }
+
+ size := make([]byte, 4)
+ binary.LittleEndian.PutUint32(size, uint32(len(data)))
+ if _, err := dest.Write(size); err != nil {
+ return err
+ }
+ if _, err := dest.Write(data); err != nil {
+ return err
+ }
+ return nil
+}
+
+type TarSplitData struct {
+ Data []byte
+ Digest digest.Digest
+ UncompressedSize int64
+}
+
+func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error {
+ // 8 is the size of the zstd skippable frame header + the frame size
+ const zstdSkippableFrameHeader = 8
+ manifestOffset := offset + zstdSkippableFrameHeader
+
+ toc := TOC{
+ Version: 1,
+ Entries: metadata,
+ TarSplitDigest: tarSplitData.Digest,
+ }
+
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ // Generate the manifest
+ manifest, err := json.Marshal(toc)
+ if err != nil {
+ return err
+ }
+
+ var compressedBuffer bytes.Buffer
+ zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
+ if err != nil {
+ return err
+ }
+ if _, err := zstdWriter.Write(manifest); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ compressedManifest := compressedBuffer.Bytes()
+
+ manifestDigester := digest.Canonical.Digester()
+ manifestChecksum := manifestDigester.Hash()
+ if _, err := manifestChecksum.Write(compressedManifest); err != nil {
+ return err
+ }
+
+ outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
+ outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
+ if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
+ return err
+ }
+
+ tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
+ outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
+ if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
+ return err
+ }
+
+ footer := ZstdChunkedFooterData{
+ ManifestType: uint64(ManifestTypeCRFS),
+ Offset: manifestOffset,
+ LengthCompressed: uint64(len(compressedManifest)),
+ LengthUncompressed: uint64(len(manifest)),
+ OffsetTarSplit: uint64(tarSplitOffset),
+ LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
+ LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
+ }
+
+ manifestDataLE := footerDataToBlob(footer)
+
+ return appendZstdSkippableFrame(dest, manifestDataLE)
+}
+
+func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
+ el := zstd.EncoderLevelFromZstd(level)
+ return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
+}
+
+// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
+// This footer exists to make the blobs self-describing, our implementation
+// never reads it:
+// Partial pull security hinges on the TOC digest, and that exists as a layer annotation;
+// so we are relying on the layer annotations anyway, and doing so means we can avoid
+// a round-trip to fetch this binary footer.
+type ZstdChunkedFooterData struct {
+ ManifestType uint64
+
+ Offset uint64
+ LengthCompressed uint64
+ LengthUncompressed uint64
+
+ OffsetTarSplit uint64
+ LengthCompressedTarSplit uint64
+ LengthUncompressedTarSplit uint64
+ ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose.
+}
+
+func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
+ // Store the offset to the manifest and its size in LE order
+ manifestDataLE := make([]byte, FooterSizeSupported)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit)
+ copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic)
+
+ return manifestDataLE
+}
+
+// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
+func timeIfNotZero(t *time.Time) *time.Time {
+ if t == nil || t.IsZero() {
+ return nil
+ }
+ return t
+}
+
+// NewFileMetadata creates a basic FileMetadata entry for hdr.
+// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately.
+func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) {
+ typ, err := GetType(hdr.Typeflag)
+ if err != nil {
+ return FileMetadata{}, err
+ }
+ xattrs := make(map[string]string)
+ for k, v := range hdr.PAXRecords {
+ xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr)
+ if !ok {
+ continue
+ }
+ xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ return FileMetadata{
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: timeIfNotZero(&hdr.ModTime),
+ AccessTime: timeIfNotZero(&hdr.AccessTime),
+ ChangeTime: timeIfNotZero(&hdr.ChangeTime),
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ }, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go
new file mode 100644
index 000000000..cc9ee85bf
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go
@@ -0,0 +1,44 @@
+package chunked
+
+import (
+ "io"
+)
+
+// ImageSourceChunk is a portion of a blob.
+type ImageSourceChunk struct {
+ Offset uint64
+ Length uint64
+}
+
+// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
+type ImageSourceSeekable interface {
+ // GetBlobAt returns a stream for the specified blob.
+ GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error)
+}
+
+// ErrBadRequest is returned when the request is not valid
+type ErrBadRequest struct { //nolint: errname
+}
+
+func (e ErrBadRequest) Error() string {
+ return "bad request"
+}
+
+// ErrFallbackToOrdinaryLayerDownload is a custom error type that
+// suggests to the caller that a fallback mechanism can be used
+// instead of a hard failure.
+type ErrFallbackToOrdinaryLayerDownload struct {
+ Err error
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
+ return c.Err.Error()
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
+ return c.Err
+}
+
+func newErrFallbackToOrdinaryLayerDownload(err error) error {
+ return ErrFallbackToOrdinaryLayerDownload{Err: err}
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
new file mode 100644
index 000000000..8ecbfb982
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -0,0 +1,1736 @@
+package chunked
+
+import (
+ archivetar "archive/tar"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ storage "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/chunked/toc"
+ "github.com/containers/storage/pkg/fsverity"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/klauspost/compress/zstd"
+ "github.com/klauspost/pgzip"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/archive/tar"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ maxNumberMissingChunks = 1024
+ autoMergePartsThreshold = 1024 // if the gap between two ranges is below this threshold, automatically merge them.
+ newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
+ bigDataKey = "zstd-chunked-manifest"
+ chunkedData = "zstd-chunked-data"
+ chunkedLayerDataKey = "zstd-chunked-layer-data"
+ tocKey = "toc"
+ fsVerityDigestsKey = "fs-verity-digests"
+
+ fileTypeZstdChunked = iota
+ fileTypeEstargz
+ fileTypeNoCompression
+ fileTypeHole
+
+ copyGoRoutines = 32
+)
+
+type compressedFileType int
+
+type chunkedDiffer struct {
+ stream ImageSourceSeekable
+ manifest []byte
+ toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
+ tarSplit []byte
+ layersCache *layersCache
+ tocOffset int64
+ fileType compressedFileType
+
+ copyBuffer []byte
+
+ gzipReader *pgzip.Reader
+ zstdReader *zstd.Decoder
+ rawReader io.Reader
+
+ // tocDigest is the digest of the TOC document when the layer
+ // is partially pulled.
+ tocDigest digest.Digest
+
+ // convertedToZstdChunked is set to true if the layer needs to
+ // be converted to the zstd:chunked format before it can be
+ // handled.
+ convertToZstdChunked bool
+
+ // skipValidation is set to true if the individual files in
+ // the layer are trusted and should not be validated.
+ skipValidation bool
+
+ // blobDigest is the digest of the whole compressed layer. It is used if
+ // convertToZstdChunked to validate a layer when it is converted since there
+ // is no TOC referenced by the manifest.
+ blobDigest digest.Digest
+
+ blobSize int64
+ uncompressedTarSize int64 // -1 if unknown
+
+ pullOptions map[string]string
+
+ useFsVerity graphdriver.DifferFsVerity
+ fsVerityDigests map[string]string
+ fsVerityMutex sync.Mutex
+}
+
+var xattrsToIgnore = map[string]interface{}{
+ "security.selinux": true,
+}
+
+// chunkedLayerData is used to store additional information about the layer
+type chunkedLayerData struct {
+ Format graphdriver.DifferOutputFormat `json:"format"`
+}
+
+func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
+ diff, err := archive.DecompressStream(payload)
+ if err != nil {
+ return 0, nil, "", nil, err
+ }
+
+ fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
+ if err != nil {
+ return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
+ }
+
+ f := os.NewFile(uintptr(fd), destDirectory)
+
+ newAnnotations := make(map[string]string)
+ level := 1
+ chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level)
+ if err != nil {
+ f.Close()
+ return 0, nil, "", nil, err
+ }
+
+ convertedOutputDigester := digest.Canonical.Digester()
+ copied, err := io.CopyBuffer(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff, c.copyBuffer)
+ if err != nil {
+ f.Close()
+ return 0, nil, "", nil, err
+ }
+ if err := chunked.Close(); err != nil {
+ f.Close()
+ return 0, nil, "", nil, err
+ }
+
+ return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil
+}
+
+// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
+// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
+// retry the operation with a different method.
+func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
+ pullOptions := store.PullOptions()
+
+ if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
+ // If convertImages is set, the two options disagree whether fallback is permissible.
+ // Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected.
+ return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
+ }
+ // convertImages also serves as a “must not fallback to non-partial pull†option (?!)
+ convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
+
+ graphDriver, err := store.GraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
+ if convertImages {
+ return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
+ }
+ return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
+ }
+
+ differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
+ if err != nil {
+ if !canFallback {
+ return nil, err
+ }
+ // If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
+ if convertImages {
+ logrus.Debugf("Created differ to convert blob %q", blobDigest)
+ return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
+ }
+ return nil, newErrFallbackToOrdinaryLayerDownload(err)
+ }
+
+ return differ, nil
+}
+
+// getProperDiffer is an implementation detail of GetDiffer.
+// It returns a “proper†differ (not a convert_images one) if possible.
+// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
+// is permissible.
+func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
+ zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
+ estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
+
+ switch {
+ case hasZstdChunkedTOC && hasEstargzTOC:
+ return nil, false, errors.New("both zstd:chunked and eStargz TOC found")
+
+ case hasZstdChunkedTOC:
+ zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
+ if err != nil {
+ return nil, false, err
+ }
+ differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
+ if err != nil {
+ logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
+ // If the error is a bad request to the server, then signal to the caller that it can try a different method.
+ var badRequestErr ErrBadRequest
+ return nil, errors.As(err, &badRequestErr), err
+ }
+ logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
+ return differ, false, nil
+
+ case hasEstargzTOC:
+ estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
+ if err != nil {
+ return nil, false, err
+ }
+ differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
+ if err != nil {
+ logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
+ // If the error is a bad request to the server, then signal to the caller that it can try a different method.
+ var badRequestErr ErrBadRequest
+ return nil, errors.As(err, &badRequestErr), err
+ }
+ logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
+ return differ, false, nil
+
+ default: // no TOC
+ convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
+ if !convertImages {
+ return nil, true, errors.New("no TOC found and convert_images is not configured")
+ }
+ return nil, true, errors.New("no TOC found")
+ }
+}
+
+func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
+ layersCache, err := getLayersCache(store)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chunkedDiffer{
+ fsVerityDigests: make(map[string]string),
+ blobDigest: blobDigest,
+ blobSize: blobSize,
+ uncompressedTarSize: -1, // Will be computed later
+ convertToZstdChunked: true,
+ copyBuffer: makeCopyBuffer(),
+ layersCache: layersCache,
+ pullOptions: pullOptions,
+ stream: iss,
+ }, nil
+}
+
+func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
+ manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
+ if err != nil {
+ return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
+ }
+ var uncompressedTarSize int64 = -1
+ if tarSplit != nil {
+ uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
+ if err != nil {
+ return nil, fmt.Errorf("computing size from tar-split: %w", err)
+ }
+ }
+
+ layersCache, err := getLayersCache(store)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chunkedDiffer{
+ fsVerityDigests: make(map[string]string),
+ blobSize: blobSize,
+ uncompressedTarSize: uncompressedTarSize,
+ tocDigest: tocDigest,
+ copyBuffer: makeCopyBuffer(),
+ fileType: fileTypeZstdChunked,
+ layersCache: layersCache,
+ manifest: manifest,
+ toc: toc,
+ pullOptions: pullOptions,
+ stream: iss,
+ tarSplit: tarSplit,
+ tocOffset: tocOffset,
+ }, nil
+}
+
+func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
+ manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
+ if err != nil {
+ return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
+ }
+ layersCache, err := getLayersCache(store)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chunkedDiffer{
+ fsVerityDigests: make(map[string]string),
+ blobSize: blobSize,
+ uncompressedTarSize: -1, // We would have to read and decompress the whole layer
+ tocDigest: tocDigest,
+ copyBuffer: makeCopyBuffer(),
+ fileType: fileTypeEstargz,
+ layersCache: layersCache,
+ manifest: manifest,
+ pullOptions: pullOptions,
+ stream: iss,
+ tocOffset: tocOffset,
+ }, nil
+}
+
+func makeCopyBuffer() []byte {
+ return make([]byte, 2<<20)
+}
+
+// copyFileFromOtherLayer copies a file from another layer
+// file is the file to look for.
+// source is the path to the source layer checkout.
+// name is the path to the file to copy in source.
+// dirfd is an open file descriptor to the destination root directory.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ srcDirfd, err := unix.Open(source, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return false, nil, 0, &fs.PathError{Op: "open", Path: source, Err: err}
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(srcDirfd, name, unix.O_RDONLY|syscall.O_CLOEXEC, 0)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ defer srcFile.Close()
+
+ dstFile, written, err := copyFileContent(int(srcFile.Fd()), file, dirfd, 0, useHardLinks)
+ if err != nil {
+ return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err)
+ }
+ return true, dstFile, written, nil
+}
+
+// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile.
+// It checks that the two files have the same UID, GID, file mode and xattrs.
+func canDedupMetadataWithHardLink(file *fileMetadata, otherFile *fileMetadata) bool {
+ if file.UID != otherFile.UID {
+ return false
+ }
+ if file.GID != otherFile.GID {
+ return false
+ }
+ if file.Mode != otherFile.Mode {
+ return false
+ }
+ if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) {
+ return false
+ }
+ return true
+}
+
+// canDedupFileWithHardLink checks if the specified file can be deduplicated by an
+// open file, given its descriptor and stat data.
+func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
+ st, ok := s.Sys().(*syscall.Stat_t)
+ if !ok {
+ return false
+ }
+
+ path := procPathForFd(fd)
+
+ listXattrs, err := system.Llistxattr(path)
+ if err != nil {
+ return false
+ }
+
+ xattrs := make(map[string]string)
+ for _, x := range listXattrs {
+ v, err := system.Lgetxattr(path, x)
+ if err != nil {
+ return false
+ }
+
+ if _, found := xattrsToIgnore[x]; found {
+ continue
+ }
+ xattrs[x] = string(v)
+ }
+ // fill only the attributes used by canDedupMetadataWithHardLink.
+ otherFile := fileMetadata{
+ FileMetadata: internal.FileMetadata{
+ UID: int(st.Uid),
+ GID: int(st.Gid),
+ Mode: int64(st.Mode),
+ Xattrs: xattrs,
+ },
+ }
+ return canDedupMetadataWithHardLink(file, &otherFile)
+}
+
+// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible.
+// file is the file to look for.
+// ostreeRepos is a list of OSTree repos.
+// dirfd is an open fd to the destination checkout.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ digest, err := digest.Parse(file.Digest)
+ if err != nil {
+ logrus.Debugf("could not parse digest: %v", err)
+ return false, nil, 0, nil
+ }
+ payloadLink := digest.Encoded() + ".payload-link"
+ if len(payloadLink) < 2 {
+ return false, nil, 0, nil
+ }
+
+ for _, repo := range ostreeRepos {
+ sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:])
+ st, err := os.Stat(sourceFile)
+ if err != nil || !st.Mode().IsRegular() {
+ continue
+ }
+ if st.Size() != file.Size {
+ continue
+ }
+ fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK|unix.O_CLOEXEC, 0)
+ if err != nil {
+ logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err)
+ return false, nil, 0, nil
+ }
+ f := os.NewFile(uintptr(fd), "fd")
+ defer f.Close()
+
+ // check if the open file can be deduplicated with hard links
+ if useHardLinks && !canDedupFileWithHardLink(file, fd, st) {
+ continue
+ }
+
+ dstFile, written, err := copyFileContent(fd, file, dirfd, 0, useHardLinks)
+ if err != nil {
+ logrus.Debugf("could not copyFileContent: %v", err)
+ return false, nil, 0, nil
+ }
+ return true, dstFile, written, nil
+ }
+ // If hard links deduplication was used and it has failed, try again without hard links.
+ if useHardLinks {
+ return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false)
+ }
+
+ return false, nil, 0, nil
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// cache is the layers cache to use.
+// file is the file to look for.
+// dirfd is an open file descriptor to the checkout root directory.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOtherLayers(cache *layersCache, file *fileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ target, name, err := cache.findFileInOtherLayers(file, useHardLinks)
+ if err != nil || name == "" {
+ return false, nil, 0, err
+ }
+ return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks)
+}
+
+func maybeDoIDRemap(manifest []fileMetadata, options *archive.TarOptions) error {
+ if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
+ return nil
+ }
+
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ for i := range manifest {
+ if options.ChownOpts != nil {
+ manifest[i].UID = options.ChownOpts.UID
+ manifest[i].GID = options.ChownOpts.GID
+ } else {
+ pair := idtools.IDPair{
+ UID: manifest[i].UID,
+ GID: manifest[i].GID,
+ }
+ var err error
+ manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
+ var out []uint32
+ for value := range inputMap {
+ out = append(out, value)
+ }
+ return out
+}
+
+func collectIDs(entries []fileMetadata) ([]uint32, []uint32) {
+ uids := make(map[uint32]struct{})
+ gids := make(map[uint32]struct{})
+ for _, entry := range entries {
+ uids[uint32(entry.UID)] = struct{}{}
+ gids[uint32(entry.GID)] = struct{}{}
+ }
+ return mapToSlice(uids), mapToSlice(gids)
+}
+
+type originFile struct {
+ Root string
+ Path string
+ Offset int64
+}
+
+type missingFileChunk struct {
+ Gap int64
+ Hole bool
+
+ File *fileMetadata
+
+ CompressedSize int64
+ UncompressedSize int64
+}
+
+type missingPart struct {
+ Hole bool
+ SourceChunk *ImageSourceChunk
+ OriginFile *originFile
+ Chunks []missingFileChunk
+}
+
+func (o *originFile) OpenFile() (io.ReadCloser, error) {
+ srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, &fs.PathError{Op: "open", Path: o.Root, Err: err}
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(srcDirfd, o.Path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := srcFile.Seek(o.Offset, 0); err != nil {
+ srcFile.Close()
+ return nil, err
+ }
+ return srcFile, nil
+}
+
+func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
+ switch {
+ case partCompression == fileTypeHole:
+ // The entire part is a hole. Do not need to read from a file.
+ c.rawReader = nil
+ return fileTypeHole, nil
+ case mf.Hole:
+ // Only the missing chunk in the requested part refers to a hole.
+ // The received data must be discarded.
+ limitReader := io.LimitReader(from, mf.CompressedSize)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
+ return fileTypeHole, err
+ case partCompression == fileTypeZstdChunked:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.zstdReader == nil {
+ r, err := zstd.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.zstdReader = r
+ } else {
+ if err := c.zstdReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeEstargz:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.gzipReader == nil {
+ r, err := pgzip.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.gzipReader = r
+ } else {
+ if err := c.gzipReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeNoCompression:
+ c.rawReader = io.LimitReader(from, mf.UncompressedSize)
+ default:
+ return partCompression, fmt.Errorf("unknown file type %q", c.fileType)
+ }
+ return partCompression, nil
+}
+
+// hashHole writes SIZE zeros to the specified hasher
+func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
+ count := int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ for i := int64(0); i < count; i++ {
+ copyBuffer[i] = 0
+ }
+ for size > 0 {
+ count = int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ if _, err := h.Write(copyBuffer[:count]); err != nil {
+ return err
+ }
+ size -= count
+ }
+ return nil
+}
+
+func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
+ switch compression {
+ case fileTypeZstdChunked:
+ defer func() {
+ if err := c.zstdReader.Reset(nil); err != nil {
+ logrus.Warnf("release of references to the previous zstd reader failed: %v", err)
+ }
+ }()
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeEstargz:
+ defer c.gzipReader.Close()
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeNoCompression:
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeHole:
+ if err := appendHole(int(destFile.file.Fd()), destFile.metadata.Name, size); err != nil {
+ return err
+ }
+ if destFile.hash != nil {
+ if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("unknown file type %q", c.fileType)
+ }
+ return nil
+}
+
+type recordFsVerityFunc func(string, *os.File) error
+
+type destinationFile struct {
+ digester digest.Digester
+ dirfd int
+ file *os.File
+ hash hash.Hash
+ metadata *fileMetadata
+ options *archive.TarOptions
+ skipValidation bool
+ to io.Writer
+ recordFsVerity recordFsVerityFunc
+}
+
+func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
+ file, err := openFileUnderRoot(dirfd, metadata.Name, newFileFlags, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ var digester digest.Digester
+ var hash hash.Hash
+ var to io.Writer
+
+ if skipValidation {
+ to = file
+ } else {
+ digester = digest.Canonical.Digester()
+ hash = digester.Hash()
+ to = io.MultiWriter(file, hash)
+ }
+
+ return &destinationFile{
+ file: file,
+ digester: digester,
+ hash: hash,
+ to: to,
+ metadata: metadata,
+ options: options,
+ dirfd: dirfd,
+ skipValidation: skipValidation,
+ recordFsVerity: recordFsVerity,
+ }, nil
+}
+
+func (d *destinationFile) Close() (Err error) {
+ defer func() {
+ var roFile *os.File
+ var err error
+
+ if d.recordFsVerity != nil {
+ roFile, err = reopenFileReadOnly(d.file)
+ if err == nil {
+ defer roFile.Close()
+ } else if Err == nil {
+ Err = err
+ }
+ }
+
+ err = d.file.Close()
+ if Err == nil {
+ Err = err
+ }
+
+ if Err == nil && roFile != nil {
+ Err = d.recordFsVerity(d.metadata.Name, roFile)
+ }
+ }()
+
+ if !d.skipValidation {
+ manifestChecksum, err := digest.Parse(d.metadata.Digest)
+ if err != nil {
+ return err
+ }
+ if d.digester.Digest() != manifestChecksum {
+ return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum)
+ }
+ }
+
+ return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
+}
+
+func closeDestinationFiles(files chan *destinationFile, errors chan error) {
+ for f := range files {
+ errors <- f.Close()
+ }
+ close(errors)
+}
+
+func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
+ if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
+ return nil
+ }
+ // fsverity.EnableVerity doesn't return an error if fs-verity was already
+ // enabled on the file.
+ err := fsverity.EnableVerity(path, int(roFile.Fd()))
+ if err != nil {
+ if c.useFsVerity == graphdriver.DifferFsVerityRequired {
+ return err
+ }
+
+ // If it is not required, ignore the error if the filesystem does not support it.
+ if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) {
+ return nil
+ }
+ }
+ verity, err := fsverity.MeasureVerity(path, int(roFile.Fd()))
+ if err != nil {
+ return err
+ }
+
+ c.fsVerityMutex.Lock()
+ c.fsVerityDigests[path] = verity
+ c.fsVerityMutex.Unlock()
+
+ return nil
+}
+
+func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
+ var destFile *destinationFile
+
+ filesToClose := make(chan *destinationFile, 3)
+ closeFilesErrors := make(chan error, 2)
+
+ go closeDestinationFiles(filesToClose, closeFilesErrors)
+ defer func() {
+ close(filesToClose)
+ for e := range closeFilesErrors {
+ if e != nil && Err == nil {
+ Err = e
+ }
+ }
+ }()
+
+ for _, missingPart := range missingParts {
+ var part io.ReadCloser
+ partCompression := c.fileType
+ switch {
+ case missingPart.Hole:
+ partCompression = fileTypeHole
+ case missingPart.OriginFile != nil:
+ var err error
+ part, err = missingPart.OriginFile.OpenFile()
+ if err != nil {
+ return err
+ }
+ partCompression = fileTypeNoCompression
+ case missingPart.SourceChunk != nil:
+ select {
+ case p := <-streams:
+ part = p
+ case err := <-errs:
+ if err == nil {
+ return errors.New("not enough data returned from the server")
+ }
+ return err
+ }
+ if part == nil {
+ return errors.New("invalid stream returned")
+ }
+ default:
+ return errors.New("internal error: missing part misses both local and remote data stream")
+ }
+
+ for _, mf := range missingPart.Chunks {
+ if mf.Gap > 0 {
+ limitReader := io.LimitReader(part, mf.Gap)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ continue
+ }
+
+ if mf.File.Name == "" {
+ Err = errors.New("file name empty")
+ goto exit
+ }
+
+ compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+
+ // Open the new file if it is different that what is already
+ // opened
+ if destFile == nil || destFile.metadata.Name != mf.File.Name {
+ var err error
+ if destFile != nil {
+ cleanup:
+ for {
+ select {
+ case err = <-closeFilesErrors:
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ default:
+ break cleanup
+ }
+ }
+ filesToClose <- destFile
+ }
+ recordFsVerity := c.recordFsVerity
+ if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
+ recordFsVerity = nil
+ }
+ destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ }
+
+ if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil {
+ Err = err
+ goto exit
+ }
+ if c.rawReader != nil {
+ if _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer); err != nil {
+ Err = err
+ goto exit
+ }
+ }
+ }
+ exit:
+ if part != nil {
+ part.Close()
+ if Err != nil {
+ break
+ }
+ }
+ }
+
+ if destFile != nil {
+ return destFile.Close()
+ }
+
+ return nil
+}
+
+func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
+ getGap := func(missingParts []missingPart, i int) uint64 {
+ prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
+ return missingParts[i].SourceChunk.Offset - prev
+ }
+
+ // simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later.
+ newMissingParts := missingParts[0:1]
+ prevIndex := 0
+ for i := 1; i < len(missingParts); i++ {
+ gap := getGap(missingParts, i)
+ if gap == 0 && missingParts[prevIndex].OriginFile == nil &&
+ missingParts[i].OriginFile == nil &&
+ !missingParts[prevIndex].Hole && !missingParts[i].Hole &&
+ len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
+ missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
+ missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
+ missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
+ } else {
+ newMissingParts = append(newMissingParts, missingParts[i])
+ prevIndex++
+ }
+ }
+ missingParts = newMissingParts
+
+ type gap struct {
+ from int
+ to int
+ cost uint64
+ }
+ var requestGaps []gap
+ lastOffset := int(-1)
+ numberSourceChunks := 0
+ for i, c := range missingParts {
+ if c.OriginFile != nil || c.Hole {
+ // it does not require a network request
+ continue
+ }
+ numberSourceChunks++
+ if lastOffset >= 0 {
+ prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length
+ cost := c.SourceChunk.Offset - prevEnd
+ g := gap{
+ from: lastOffset,
+ to: i,
+ cost: cost,
+ }
+ requestGaps = append(requestGaps, g)
+ }
+ lastOffset = i
+ }
+ sort.Slice(requestGaps, func(i, j int) bool {
+ return requestGaps[i].cost < requestGaps[j].cost
+ })
+ toMergeMap := make([]bool, len(missingParts))
+ remainingToMerge := numberSourceChunks - target
+ for _, g := range requestGaps {
+ if remainingToMerge < 0 && g.cost > autoMergePartsThreshold {
+ continue
+ }
+ for i := g.from + 1; i <= g.to; i++ {
+ toMergeMap[i] = true
+ }
+ remainingToMerge--
+ }
+
+ newMissingParts = missingParts[0:1]
+ for i := 1; i < len(missingParts); i++ {
+ if !toMergeMap[i] {
+ newMissingParts = append(newMissingParts, missingParts[i])
+ } else {
+ gap := getGap(missingParts, i)
+ prev := &newMissingParts[len(newMissingParts)-1]
+ prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ prev.Hole = false
+ prev.OriginFile = nil
+ if gap > 0 {
+ gapFile := missingFileChunk{
+ Gap: int64(gap),
+ }
+ prev.Chunks = append(prev.Chunks, gapFile)
+ }
+ prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...)
+ }
+ }
+ return newMissingParts
+}
+
+func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
+ var chunksToRequest []ImageSourceChunk
+
+ calculateChunksToRequest := func() {
+ chunksToRequest = []ImageSourceChunk{}
+ for _, c := range missingParts {
+ if c.OriginFile == nil && !c.Hole {
+ chunksToRequest = append(chunksToRequest, *c.SourceChunk)
+ }
+ }
+ }
+
+ missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
+ calculateChunksToRequest()
+
+ // There are some missing files. Prepare a multirange request for the missing chunks.
+ var streams chan io.ReadCloser
+ var err error
+ var errs chan error
+ for {
+ streams, errs, err = stream.GetBlobAt(chunksToRequest)
+ if err == nil {
+ break
+ }
+
+ if _, ok := err.(ErrBadRequest); ok {
+ if len(chunksToRequest) == 1 {
+ return err
+ }
+ // Merge more chunks to request
+ missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
+ calculateChunksToRequest()
+ continue
+ }
+ return err
+ }
+
+ if err := c.storeMissingFiles(streams, errs, dirfd, missingParts, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+type hardLinkToCreate struct {
+ dest string
+ dirfd int
+ mode os.FileMode
+ metadata *fileMetadata
+}
+
+func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
+ if value, ok := pullOptions[name]; ok {
+ return strings.ToLower(value) == "true"
+ }
+ return def
+}
+
+type findAndCopyFileOptions struct {
+ useHardLinks bool
+ ostreeRepos []string
+ options *archive.TarOptions
+}
+
+func reopenFileReadOnly(f *os.File) (*os.File, error) {
+ path := procPathForFile(f)
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, &fs.PathError{Op: "open", Path: path, Err: err}
+ }
+ return os.NewFile(uintptr(fd), f.Name()), nil
+}
+
+func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
+ finalizeFile := func(dstFile *os.File) error {
+ if dstFile == nil {
+ return nil
+ }
+ err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false)
+ if err != nil {
+ dstFile.Close()
+ return err
+ }
+ var roFile *os.File
+ if c.useFsVerity != graphdriver.DifferFsVerityDisabled {
+ roFile, err = reopenFileReadOnly(dstFile)
+ }
+ dstFile.Close()
+ if err != nil {
+ return err
+ }
+ if roFile == nil {
+ return nil
+ }
+
+ defer roFile.Close()
+ return c.recordFsVerity(r.Name, roFile)
+ }
+
+ found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
+ var new []fileMetadata
+
+ hashes := make(map[string]string)
+ for i := range mergedEntries {
+ if mergedEntries[i].Type != TypeReg {
+ continue
+ }
+ if mergedEntries[i].Digest == "" {
+ return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name)
+ }
+ digest, err := digest.Parse(mergedEntries[i].Digest)
+ if err != nil {
+ return nil, err
+ }
+ d := digest.Encoded()
+
+ if hashes[d] != "" {
+ continue
+ }
+ hashes[d] = d
+
+ mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
+ mergedEntries[i].skipSetAttrs = true
+
+ new = append(new, mergedEntries[i])
+ }
+ return new, nil
+}
+
+func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
+ var payload io.ReadCloser
+ var streams chan io.ReadCloser
+ var errs chan error
+ var err error
+
+ chunksToRequest := []ImageSourceChunk{
+ {
+ Offset: 0,
+ Length: uint64(c.blobSize),
+ },
+ }
+
+ streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
+ if err != nil {
+ return "", err
+ }
+ select {
+ case p := <-streams:
+ payload = p
+ case err := <-errs:
+ return "", err
+ }
+ if payload == nil {
+ return "", errors.New("invalid stream returned")
+ }
+ defer payload.Close()
+
+ originalRawDigester := digest.Canonical.Digester()
+
+ r := io.TeeReader(payload, originalRawDigester.Hash())
+
+ // copy the entire tarball and compute its digest
+ _, err = io.CopyBuffer(destination, r, c.copyBuffer)
+
+ return originalRawDigester.Digest(), err
+}
+
+func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
+ defer c.layersCache.release()
+ defer func() {
+ if c.zstdReader != nil {
+ c.zstdReader.Close()
+ }
+ }()
+
+ c.useFsVerity = differOpts.UseFsVerity
+
+ // stream to use for reading the zstd:chunked or Estargz file.
+ stream := c.stream
+
+ var compressedDigest digest.Digest
+ var uncompressedDigest digest.Digest
+
+ if c.convertToZstdChunked {
+ fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, &fs.PathError{Op: "open", Path: dest, Err: err}
+ }
+ blobFile := os.NewFile(uintptr(fd), "blob-file")
+ defer func() {
+ if blobFile != nil {
+ blobFile.Close()
+ }
+ }()
+
+ // calculate the checksum before accessing the file.
+ compressedDigest, err = c.copyAllBlobToFile(blobFile)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+
+ if compressedDigest != c.blobDigest {
+ return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest)
+ }
+
+ if _, err := blobFile.Seek(0, io.SeekStart); err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+
+ tarSize, fileSource, diffID, annotations, err := c.convertTarToZstdChunked(dest, blobFile)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+ c.uncompressedTarSize = tarSize
+ // fileSource is a O_TMPFILE file descriptor, so we
+ // need to keep it open until the entire file is processed.
+ defer fileSource.Close()
+
+ // Close the file so that the file descriptor is released and the file is deleted.
+ blobFile.Close()
+ blobFile = nil
+
+ tocDigest, err := toc.GetTOCDigest(annotations)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: parsing just-created zstd:chunked TOC digest: %w", err)
+ }
+ if tocDigest == nil {
+ return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
+ }
+ manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
+ }
+
+ // Use the new file for accessing the zstd:chunked file.
+ stream = fileSource
+
+ // fill the chunkedDiffer with the data we just read.
+ c.fileType = fileTypeZstdChunked
+ c.manifest = manifest
+ c.toc = toc
+ c.tarSplit = tarSplit
+ c.tocOffset = tocOffset
+
+ // the file was generated by us and the digest for each file was already computed, no need to validate it again.
+ c.skipValidation = true
+ // since we retrieved the whole file and it was validated, set the uncompressed digest.
+ uncompressedDigest = diffID
+ }
+
+ lcd := chunkedLayerData{
+ Format: differOpts.Format,
+ }
+
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ lcdBigData, err := json.Marshal(lcd)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+
+ // Generate the manifest
+ toc := c.toc
+ if toc == nil {
+ toc_, err := unmarshalToc(c.manifest)
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+ toc = toc_
+ }
+
+ output := graphdriver.DriverWithDifferOutput{
+ Differ: c,
+ TarSplit: c.tarSplit,
+ BigData: map[string][]byte{
+ bigDataKey: c.manifest,
+ chunkedLayerDataKey: lcdBigData,
+ },
+ Artifacts: map[string]interface{}{
+ tocKey: toc,
+ },
+ TOCDigest: c.tocDigest,
+ UncompressedDigest: uncompressedDigest,
+ CompressedDigest: compressedDigest,
+ Size: c.uncompressedTarSize,
+ }
+
+ // When the hard links deduplication is used, file attributes are ignored because setting them
+ // modifies the source file as well.
+ useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
+
+ // List of OSTree repositories to use for deduplication
+ ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
+
+ whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
+
+ var missingParts []missingPart
+
+ mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
+ if err != nil {
+ return output, err
+ }
+
+ output.UIDs, output.GIDs = collectIDs(mergedEntries)
+
+ if err := maybeDoIDRemap(mergedEntries, options); err != nil {
+ return output, err
+ }
+
+ if options.ForceMask != nil {
+ uid, gid, mode, err := archive.GetFileOwner(dest)
+ if err == nil {
+ value := idtools.Stat{
+ IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
+ Mode: os.FileMode(mode),
+ }
+ if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
+ return output, err
+ }
+ }
+ }
+
+ dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return output, &fs.PathError{Op: "open", Path: dest, Err: err}
+ }
+ defer unix.Close(dirfd)
+
+ if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
+ mergedEntries, err = makeEntriesFlat(mergedEntries)
+ if err != nil {
+ return output, err
+ }
+ createdDirs := make(map[string]struct{})
+ for _, e := range mergedEntries {
+ d := e.Name[0:2]
+ if _, found := createdDirs[d]; !found {
+ if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
+ return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
+ }
+ createdDirs[d] = struct{}{}
+ }
+ }
+ }
+
+ // hardlinks can point to missing files. So create them after all files
+ // are retrieved
+ var hardLinks []hardLinkToCreate
+
+ missingPartsSize, totalChunksSize := int64(0), int64(0)
+
+ copyOptions := findAndCopyFileOptions{
+ useHardLinks: useHardLinks,
+ ostreeRepos: ostreeRepos,
+ options: options,
+ }
+
+ type copyFileJob struct {
+ njob int
+ index int
+ mode os.FileMode
+ metadata *fileMetadata
+
+ found bool
+ err error
+ }
+
+ var wg sync.WaitGroup
+
+ copyResults := make([]copyFileJob, len(mergedEntries))
+
+ copyFileJobs := make(chan copyFileJob)
+ defer func() {
+ if copyFileJobs != nil {
+ close(copyFileJobs)
+ }
+ wg.Wait()
+ }()
+
+ for range copyGoRoutines {
+ wg.Add(1)
+ jobs := copyFileJobs
+
+ go func() {
+ defer wg.Done()
+ for job := range jobs {
+ found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode)
+ job.err = err
+ job.found = found
+ copyResults[job.njob] = job
+ }
+ }()
+ }
+
+ filesToWaitFor := 0
+ for i := range mergedEntries {
+ r := &mergedEntries[i]
+ if options.ForceMask != nil {
+ value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
+ if r.Xattrs == nil {
+ r.Xattrs = make(map[string]string)
+ }
+ r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
+ }
+
+ mode := os.FileMode(r.Mode)
+
+ t, err := typeToTarType(r.Type)
+ if err != nil {
+ return output, err
+ }
+
+ r.Name = filepath.Clean(r.Name)
+ // do not modify the value of symlinks
+ if r.Linkname != "" && t != tar.TypeSymlink {
+ r.Linkname = filepath.Clean(r.Linkname)
+ }
+
+ if whiteoutConverter != nil {
+ hdr := archivetar.Header{
+ Typeflag: t,
+ Name: r.Name,
+ Linkname: r.Linkname,
+ Size: r.Size,
+ Mode: r.Mode,
+ Uid: r.UID,
+ Gid: r.GID,
+ }
+ handler := whiteoutHandler{
+ Dirfd: dirfd,
+ Root: dest,
+ }
+ writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler)
+ if err != nil {
+ return output, err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+ switch t {
+ case tar.TypeReg:
+ // Create directly empty files.
+ if r.Size == 0 {
+ // Used to have a scope for cleanup.
+ createEmptyFile := func() error {
+ file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ if err := setFileAttrs(dirfd, file, mode, r, options, false); err != nil {
+ return err
+ }
+ return nil
+ }
+ if err := createEmptyFile(); err != nil {
+ return output, err
+ }
+ continue
+ }
+
+ case tar.TypeDir:
+ if r.Name == "" || r.Name == "." {
+ output.RootDirMode = &mode
+ }
+ if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
+ return output, err
+ }
+ continue
+
+ case tar.TypeLink:
+ dest := dest
+ dirfd := dirfd
+ mode := mode
+ r := r
+ hardLinks = append(hardLinks, hardLinkToCreate{
+ dest: dest,
+ dirfd: dirfd,
+ mode: mode,
+ metadata: r,
+ })
+ continue
+
+ case tar.TypeSymlink:
+ if err := safeSymlink(dirfd, r); err != nil {
+ return output, err
+ }
+ continue
+
+ case tar.TypeChar:
+ case tar.TypeBlock:
+ case tar.TypeFifo:
+ /* Ignore. */
+ default:
+ return output, fmt.Errorf("invalid type %q", t)
+ }
+
+ totalChunksSize += r.Size
+
+ if t == tar.TypeReg {
+ index := i
+ njob := filesToWaitFor
+ job := copyFileJob{
+ mode: mode,
+ metadata: &mergedEntries[index],
+ index: index,
+ njob: njob,
+ }
+ copyFileJobs <- job
+ filesToWaitFor++
+ }
+ }
+
+ close(copyFileJobs)
+ copyFileJobs = nil
+
+ wg.Wait()
+
+ for _, res := range copyResults[:filesToWaitFor] {
+ r := &mergedEntries[res.index]
+
+ if res.err != nil {
+ return output, res.err
+ }
+ // the file was already copied to its destination
+ // so nothing left to do.
+ if res.found {
+ continue
+ }
+
+ missingPartsSize += r.Size
+
+ remainingSize := r.Size
+
+ // the file is missing, attempt to find individual chunks.
+ for _, chunk := range r.chunks {
+ compressedSize := int64(chunk.EndOffset - chunk.Offset)
+ size := remainingSize
+ if chunk.ChunkSize > 0 {
+ size = chunk.ChunkSize
+ }
+ remainingSize = remainingSize - size
+
+ rawChunk := ImageSourceChunk{
+ Offset: uint64(chunk.Offset),
+ Length: uint64(compressedSize),
+ }
+ file := missingFileChunk{
+ File: &mergedEntries[res.index],
+ CompressedSize: compressedSize,
+ UncompressedSize: size,
+ }
+ mp := missingPart{
+ SourceChunk: &rawChunk,
+ Chunks: []missingFileChunk{
+ file,
+ },
+ }
+
+ switch chunk.ChunkType {
+ case internal.ChunkTypeData:
+ root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
+ if err != nil {
+ return output, err
+ }
+ if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) {
+ missingPartsSize -= size
+ mp.OriginFile = &originFile{
+ Root: root,
+ Path: path,
+ Offset: offset,
+ }
+ }
+ case internal.ChunkTypeZeros:
+ missingPartsSize -= size
+ mp.Hole = true
+ // Mark all chunks belonging to the missing part as holes
+ for i := range mp.Chunks {
+ mp.Chunks[i].Hole = true
+ }
+ }
+ missingParts = append(missingParts, mp)
+ }
+ }
+ // There are some missing files. Prepare a multirange request for the missing chunks.
+ if len(missingParts) > 0 {
+ if err := c.retrieveMissingFiles(stream, dirfd, missingParts, options); err != nil {
+ return output, err
+ }
+ }
+
+ for _, m := range hardLinks {
+ if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil {
+ return output, err
+ }
+ }
+
+ if totalChunksSize > 0 {
+ logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
+ }
+
+ output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests
+
+ return output, nil
+}
+
+func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
+ // ignore the metadata files for the estargz format.
+ if fileType != fileTypeEstargz {
+ return false
+ }
+ switch e.Name {
+ // ignore the metadata files for the estargz format.
+ case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName:
+ return true
+ }
+ return false
+}
+
+func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
+ countNextChunks := func(start int) int {
+ count := 0
+ for _, e := range entries[start:] {
+ if e.Type != TypeChunk {
+ return count
+ }
+ count++
+ }
+ return count
+ }
+
+ size := 0
+ for _, entry := range entries {
+ if mustSkipFile(fileType, entry) {
+ continue
+ }
+ if entry.Type != TypeChunk {
+ size++
+ }
+ }
+
+ mergedEntries := make([]fileMetadata, size)
+ m := 0
+ for i := 0; i < len(entries); i++ {
+ e := fileMetadata{FileMetadata: entries[i]}
+ if mustSkipFile(fileType, entries[i]) {
+ continue
+ }
+
+ if e.Type == TypeChunk {
+ return nil, fmt.Errorf("chunk type without a regular file")
+ }
+
+ if e.Type == TypeReg {
+ nChunks := countNextChunks(i + 1)
+
+ e.chunks = make([]*internal.FileMetadata, nChunks+1)
+ for j := 0; j <= nChunks; j++ {
+ // we need a copy here, otherwise we override the
+ // .Size later
+ copy := entries[i+j]
+ e.chunks[j] = ©
+ e.EndOffset = entries[i+j].EndOffset
+ }
+ i += nChunks
+ }
+ mergedEntries[m] = e
+ m++
+ }
+ // stargz/estargz doesn't store EndOffset so let's calculate it here
+ lastOffset := c.tocOffset
+ for i := len(mergedEntries) - 1; i >= 0; i-- {
+ if mergedEntries[i].EndOffset == 0 {
+ mergedEntries[i].EndOffset = lastOffset
+ }
+ if mergedEntries[i].Offset != 0 {
+ lastOffset = mergedEntries[i].Offset
+ }
+
+ lastChunkOffset := mergedEntries[i].EndOffset
+ for j := len(mergedEntries[i].chunks) - 1; j >= 0; j-- {
+ mergedEntries[i].chunks[j].EndOffset = lastChunkOffset
+ mergedEntries[i].chunks[j].Size = mergedEntries[i].chunks[j].EndOffset - mergedEntries[i].chunks[j].Offset
+ lastChunkOffset = mergedEntries[i].chunks[j].Offset
+ }
+ }
+ return mergedEntries, nil
+}
+
+// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
+// same digest as chunk.ChunkDigest
+func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
+ parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return false
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err := openFileUnderRoot(parentDirfd, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return false
+ }
+ defer fd.Close()
+
+ if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil {
+ return false
+ }
+
+ r := io.LimitReader(fd, chunk.ChunkSize)
+ digester := digest.Canonical.Digester()
+
+ if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil {
+ return false
+ }
+
+ digest, err := digest.Parse(chunk.ChunkDigest)
+ if err != nil {
+ return false
+ }
+
+ return digester.Digest() == digest
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
new file mode 100644
index 000000000..7be0adeb8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
@@ -0,0 +1,17 @@
+//go:build !linux
+
+package chunked
+
+import (
+ "context"
+ "errors"
+
+ storage "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
+func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
+ return nil, errors.New("format not supported on this system")
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
new file mode 100644
index 000000000..6fbaa41b5
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
@@ -0,0 +1,41 @@
+package toc
+
+import (
+ "errors"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// tocJSONDigestAnnotation is the annotation key for the digest of the estargz
+// TOC JSON.
+// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation
+// Duplicate it here to avoid a dependency on the package.
+const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
+
+// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
+// This function retrieves a digest that represents the content of a
+// table of contents (TOC) from the image's annotations.
+// This is an experimental feature and may be changed/removed in the future.
+func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
+ d1, ok1 := annotations[tocJSONDigestAnnotation]
+ d2, ok2 := annotations[internal.ManifestChecksumKey]
+ switch {
+ case ok1 && ok2:
+ return nil, errors.New("both zstd:chunked and eStargz TOC found")
+ case ok1:
+ d, err := digest.Parse(d1)
+ if err != nil {
+ return nil, err
+ }
+ return &d, nil
+ case ok2:
+ d, err := digest.Parse(d2)
+ if err != nil {
+ return nil, err
+ }
+ return &d, nil
+ default:
+ return nil, nil
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index b9e2658a7..560df3cf5 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -5,114 +5,52 @@ import (
"os"
)
-// ThinpoolOptionsConfig represents the "storage.options.thinpool"
-// TOML config table.
-type ThinpoolOptionsConfig struct {
- // AutoExtendPercent determines the amount by which pool needs to be
- // grown. This is specified in terms of % of pool size. So a value of
- // 20 means that when threshold is hit, pool will be grown by 20% of
- // existing pool size.
- AutoExtendPercent string `toml:"autoextend_percent"`
-
- // AutoExtendThreshold determines the pool extension threshold in terms
- // of percentage of pool size. For example, if threshold is 60, that
- // means when pool is 60% full, threshold has been hit.
- AutoExtendThreshold string `toml:"autoextend_threshold"`
-
- // BaseSize specifies the size to use when creating the base device,
- // which limits the size of images and containers.
- BaseSize string `toml:"basesize"`
-
- // BlockSize specifies a custom blocksize to use for the thin pool.
- BlockSize string `toml:"blocksize"`
-
- // DirectLvmDevice specifies a custom block storage device to use for
- // the thin pool.
- DirectLvmDevice string `toml:"directlvm_device"`
-
- // DirectLvmDeviceForcewipes device even if device already has a
- // filesystem
- DirectLvmDeviceForce string `toml:"directlvm_device_force"`
-
- // Fs specifies the filesystem type to use for the base device.
- Fs string `toml:"fs"`
-
- // log_level sets the log level of devicemapper.
- LogLevel string `toml:"log_level"`
-
- // MetadataSize specifies the size of the metadata for the thinpool
- // It will be used with the `pvcreate --metadata` option.
- MetadataSize string `toml:"metadatasize"`
-
- // MinFreeSpace specifies the min free space percent in a thin pool
- // require for new device creation to
- MinFreeSpace string `toml:"min_free_space"`
-
- // MkfsArg specifies extra mkfs arguments to be used when creating the
- // basedevice.
- MkfsArg string `toml:"mkfsarg"`
-
- // MountOpt specifies extra mount options used when mounting the thin
- // devices.
- MountOpt string `toml:"mountopt"`
-
- // Size
- Size string `toml:"size"`
-
- // UseDeferredDeletion marks device for deferred deletion
- UseDeferredDeletion string `toml:"use_deferred_deletion"`
-
- // UseDeferredRemoval marks device for deferred removal
- UseDeferredRemoval string `toml:"use_deferred_removal"`
-
- // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
- // retries XFS should attempt to complete IO when ENOSPC (no space)
- // error is returned by underlying storage device.
- XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
-}
-
type AufsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
}
type BtrfsOptionsConfig struct {
// MinSpace is the minimal spaces allocated to the device
- MinSpace string `toml:"min_space"`
+ MinSpace string `toml:"min_space,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
}
type OverlayOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// Alternative program to use for the mount of the file system
- MountProgram string `toml:"mount_program"`
+ MountProgram string `toml:"mount_program,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
+ // Inodes is used to set a maximum inodes of the container image.
+ Inodes string `toml:"inodes,omitempty"`
// Do not create a bind mount on the storage home
- SkipMountHome string `toml:"skip_mount_home"`
+ SkipMountHome string `toml:"skip_mount_home,omitempty"`
+ // Specify whether composefs must be used to mount the data layers
+ UseComposefs string `toml:"use_composefs,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories
- ForceMask string `toml:"force_mask"`
+ ForceMask string `toml:"force_mask,omitempty"`
}
type VfsOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
}
type ZfsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// Name is the File System name of the ZFS File system
- Name string `toml:"fsname"`
+ Name string `toml:"fsname,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
}
// OptionsConfig represents the "storage.options" TOML config table.
@@ -120,82 +58,79 @@ type OptionsConfig struct {
// AdditionalImagesStores is the location of additional read/only
// Image stores. Usually used to access Networked File System
// for shared image content
- AdditionalImageStores []string `toml:"additionalimagestores"`
+ AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
+
+ // ImageStore is the location of image store which is separated from the
+ // container store. Usually this is not recommended unless users wants
+ // separate store for image and containers.
+ ImageStore string `toml:"imagestore,omitempty"`
// AdditionalLayerStores is the location of additional read/only
// Layer stores. Usually used to access Networked File System
// for shared image content
// This API is experimental and can be changed without bumping the
// major version number.
- AdditionalLayerStores []string `toml:"additionallayerstores"`
+ AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
- // RemapUIDs is a list of default UID mappings to use for layers.
- RemapUIDs string `toml:"remap-uids"`
- // RemapGIDs is a list of default GID mappings to use for layers.
- RemapGIDs string `toml:"remap-gids"`
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
+
+ // Specify whether composefs must be used to mount the data layers
+ UseComposefs string `toml:"use_composefs,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories.
- ForceMask os.FileMode `toml:"force_mask"`
-
- // RemapUser is the name of one or more entries in /etc/subuid which
- // should be used to set up default UID mappings.
- RemapUser string `toml:"remap-user"`
- // RemapGroup is the name of one or more entries in /etc/subgid which
- // should be used to set up default GID mappings.
- RemapGroup string `toml:"remap-group"`
+ ForceMask os.FileMode `toml:"force_mask,omitempty"`
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
// /etc/subgid which should be used to set up automatically a userns.
- RootAutoUsernsUser string `toml:"root-auto-userns-user"`
+ RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`
// AutoUsernsMinSize is the minimum size for a user namespace that is
// created automatically.
- AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"`
+ AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"`
// AutoUsernsMaxSize is the maximum size for a user namespace that is
// created automatically.
- AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"`
+ AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"`
// Aufs container options to be handed to aufs drivers
- Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
+ Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"`
// Btrfs container options to be handed to btrfs drivers
- Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"`
+ Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
- // Thinpool container options to be handed to thinpool drivers
- Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
+ // Thinpool container options to be handed to thinpool drivers (NOP)
+ Thinpool struct{} `toml:"thinpool,omitempty"`
// Overlay container options to be handed to overlay drivers
- Overlay struct{ OverlayOptionsConfig } `toml:"overlay"`
+ Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
// Vfs container options to be handed to VFS drivers
- Vfs struct{ VfsOptionsConfig } `toml:"vfs"`
+ Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"`
// Zfs container options to be handed to ZFS drivers
- Zfs struct{ ZfsOptionsConfig } `toml:"zfs"`
+ Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"`
// Do not create a bind mount on the storage home
- SkipMountHome string `toml:"skip_mount_home"`
+ SkipMountHome string `toml:"skip_mount_home,omitempty"`
// Alternative program to use for the mount of the file system
- MountProgram string `toml:"mount_program"`
+ MountProgram string `toml:"mount_program,omitempty"`
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// PullOptions specifies options to be handed to pull managers
// This API is experimental and can be changed without bumping the major version number.
- PullOptions map[string]string `toml:"pull_options"`
+ PullOptions map[string]string `toml:"pull_options,omitempty"`
// DisableVolatile doesn't allow volatile mounts when it is set.
- DisableVolatile bool `toml:"disable-volatile"`
+ DisableVolatile bool `toml:"disable-volatile,omitempty"`
}
// GetGraphDriverOptions returns the driver specific options
@@ -219,63 +154,8 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
}
- case "devicemapper":
- if options.Thinpool.AutoExtendPercent != "" {
- doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
- }
- if options.Thinpool.AutoExtendThreshold != "" {
- doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
- }
- if options.Thinpool.BaseSize != "" {
- doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
- }
- if options.Thinpool.BlockSize != "" {
- doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
- }
- if options.Thinpool.DirectLvmDevice != "" {
- doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
- }
- if options.Thinpool.DirectLvmDeviceForce != "" {
- doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
- }
- if options.Thinpool.Fs != "" {
- doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
- }
- if options.Thinpool.LogLevel != "" {
- doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
- }
- if options.Thinpool.MetadataSize != "" {
- doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
- }
- if options.Thinpool.MinFreeSpace != "" {
- doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
- }
- if options.Thinpool.MkfsArg != "" {
- doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
- }
- if options.Thinpool.MountOpt != "" {
- doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
- } else if options.MountOpt != "" {
- doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
- }
-
- if options.Thinpool.Size != "" {
- doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
- } else if options.Size != "" {
- doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
- }
-
- if options.Thinpool.UseDeferredDeletion != "" {
- doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
- }
- if options.Thinpool.UseDeferredRemoval != "" {
- doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
- }
- if options.Thinpool.XfsNoSpaceMaxRetries != "" {
- doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
- }
-
case "overlay", "overlay2":
+ // Specify whether composefs must be used to mount the data layers
if options.Overlay.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
} else if options.IgnoreChownErrors != "" {
@@ -296,6 +176,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.Size != "" {
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
}
+ if options.Overlay.Inodes != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes))
+ }
if options.Overlay.SkipMountHome != "" {
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome))
} else if options.SkipMountHome != "" {
@@ -306,6 +189,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.ForceMask != 0 {
doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
}
+ if options.Overlay.UseComposefs != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs))
+ }
case "vfs":
if options.Vfs.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
deleted file mode 100644
index 6a0ac2464..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
+++ /dev/null
@@ -1,821 +0,0 @@
-// +build linux,cgo
-
-package devicemapper
-
-import (
- "errors"
- "fmt"
- "os"
- "runtime"
- "unsafe"
-
- "github.com/sirupsen/logrus"
- "golang.org/x/sys/unix"
-)
-
-// Same as DM_DEVICE_* enum values from libdevmapper.h
-// nolint: deadcode
-const (
- deviceCreate TaskType = iota
- deviceReload
- deviceRemove
- deviceRemoveAll
- deviceSuspend
- deviceResume
- deviceInfo
- deviceDeps
- deviceRename
- deviceVersion
- deviceStatus
- deviceTable
- deviceWaitevent
- deviceList
- deviceClear
- deviceMknodes
- deviceListVersions
- deviceTargetMsg
- deviceSetGeometry
-)
-
-const (
- addNodeOnResume AddNodeType = iota
- addNodeOnCreate
-)
-
-// List of errors returned when using devicemapper.
-var (
- ErrTaskRun = errors.New("dm_task_run failed")
- ErrTaskSetName = errors.New("dm_task_set_name failed")
- ErrTaskSetMessage = errors.New("dm_task_set_message failed")
- ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed")
- ErrTaskSetRo = errors.New("dm_task_set_ro failed")
- ErrTaskAddTarget = errors.New("dm_task_add_target failed")
- ErrTaskSetSector = errors.New("dm_task_set_sector failed")
- ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
- ErrTaskGetInfo = errors.New("dm_task_get_info failed")
- ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
- ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
- ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
- ErrNilCookie = errors.New("cookie ptr can't be nil")
- ErrGetBlockSize = errors.New("Can't get block size")
- ErrUdevWait = errors.New("wait on udev cookie failed")
- ErrSetDevDir = errors.New("dm_set_dev_dir failed")
- ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
- ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove")
- ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
- ErrInvalidAddNode = errors.New("Invalid AddNode type")
- ErrBusy = errors.New("Device is Busy")
- ErrDeviceIDExists = errors.New("Device Id Exists")
- ErrEnxio = errors.New("No such device or address")
-)
-
-var (
- dmSawBusy bool
- dmSawExist bool
- dmSawEnxio bool // No Such Device or Address
-)
-
-type (
- // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl
- // command to execute.
- Task struct {
- unmanaged *cdmTask
- }
- // Deps represents dependents (layer) of a device.
- Deps struct {
- Count uint32
- Filler uint32
- Device []uint64
- }
- // Info represents information about a device.
- Info struct {
- Exists int
- Suspended int
- LiveTable int
- InactiveTable int
- OpenCount int32
- EventNr uint32
- Major uint32
- Minor uint32
- ReadOnly int
- TargetCount int32
- DeferredRemove int
- }
- // TaskType represents a type of task
- TaskType int
- // AddNodeType represents a type of node to be added
- AddNodeType int
-)
-
-// DeviceIDExists returns whether error conveys the information about device Id already
-// exist or not. This will be true if device creation or snap creation
-// operation fails if device or snap device already exists in pool.
-// Current implementation is little crude as it scans the error string
-// for exact pattern match. Replacing it with more robust implementation
-// is desirable.
-func DeviceIDExists(err error) bool {
- return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists)
-}
-
-func (t *Task) destroy() {
- if t != nil {
- DmTaskDestroy(t.unmanaged)
- runtime.SetFinalizer(t, nil)
- }
-}
-
-// TaskCreateNamed is a convenience function for TaskCreate when a name
-// will be set on the task as well
-func TaskCreateNamed(t TaskType, name string) (*Task, error) {
- task := TaskCreate(t)
- if task == nil {
- return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t))
- }
- if err := task.setName(name); err != nil {
- return nil, fmt.Errorf("devicemapper: Can't set task name %s", name)
- }
- return task, nil
-}
-
-// TaskCreate initializes a devicemapper task of tasktype
-func TaskCreate(tasktype TaskType) *Task {
- Ctask := DmTaskCreate(int(tasktype))
- if Ctask == nil {
- return nil
- }
- task := &Task{unmanaged: Ctask}
- runtime.SetFinalizer(task, (*Task).destroy)
- return task
-}
-
-func (t *Task) run() error {
- if res := DmTaskRun(t.unmanaged); res != 1 {
- return ErrTaskRun
- }
- runtime.KeepAlive(t)
- return nil
-}
-
-func (t *Task) setName(name string) error {
- if res := DmTaskSetName(t.unmanaged, name); res != 1 {
- return ErrTaskSetName
- }
- return nil
-}
-
-func (t *Task) setMessage(message string) error {
- if res := DmTaskSetMessage(t.unmanaged, message); res != 1 {
- return ErrTaskSetMessage
- }
- return nil
-}
-
-func (t *Task) setSector(sector uint64) error {
- if res := DmTaskSetSector(t.unmanaged, sector); res != 1 {
- return ErrTaskSetSector
- }
- return nil
-}
-
-func (t *Task) setCookie(cookie *uint, flags uint16) error {
- if cookie == nil {
- return ErrNilCookie
- }
- if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 {
- return ErrTaskSetCookie
- }
- return nil
-}
-
-func (t *Task) setAddNode(addNode AddNodeType) error {
- if addNode != addNodeOnResume && addNode != addNodeOnCreate {
- return ErrInvalidAddNode
- }
- if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 {
- return ErrTaskSetAddNode
- }
- return nil
-}
-
-func (t *Task) setRo() error {
- if res := DmTaskSetRo(t.unmanaged); res != 1 {
- return ErrTaskSetRo
- }
- return nil
-}
-
-func (t *Task) addTarget(start, size uint64, ttype, params string) error {
- if res := DmTaskAddTarget(t.unmanaged, start, size,
- ttype, params); res != 1 {
- return ErrTaskAddTarget
- }
- return nil
-}
-
-func (t *Task) getDeps() (*Deps, error) {
- var deps *Deps
- if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
- return nil, ErrTaskGetDeps
- }
- return deps, nil
-}
-
-func (t *Task) getInfo() (*Info, error) {
- info := &Info{}
- if res := DmTaskGetInfo(t.unmanaged, info); res != 1 {
- return nil, ErrTaskGetInfo
- }
- return info, nil
-}
-
-func (t *Task) getInfoWithDeferred() (*Info, error) {
- info := &Info{}
- if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
- return nil, ErrTaskGetInfo
- }
- return info, nil
-}
-
-func (t *Task) getDriverVersion() (string, error) {
- res := DmTaskGetDriverVersion(t.unmanaged)
- if res == "" {
- return "", ErrTaskGetDriverVersion
- }
- return res, nil
-}
-
-func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
- length uint64, targetType string, params string) {
-
- return DmGetNextTarget(t.unmanaged, next, &start, &length,
- &targetType, ¶ms),
- start, length, targetType, params
-}
-
-// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
-func UdevWait(cookie *uint) error {
- if res := DmUdevWait(*cookie); res != 1 {
- logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res)
- return ErrUdevWait
- }
- return nil
-}
-
-// SetDevDir sets the dev folder for the device mapper library (usually /dev).
-func SetDevDir(dir string) error {
- if res := DmSetDevDir(dir); res != 1 {
- logrus.Debug("devicemapper: Error dm_set_dev_dir")
- return ErrSetDevDir
- }
- return nil
-}
-
-// GetLibraryVersion returns the device mapper library version.
-func GetLibraryVersion() (string, error) {
- var version string
- if res := DmGetLibraryVersion(&version); res != 1 {
- return "", ErrGetLibraryVersion
- }
- return version, nil
-}
-
-// UdevSyncSupported returns whether device-mapper is able to sync with udev
-//
-// This is essential otherwise race conditions can arise where both udev and
-// device-mapper attempt to create and destroy devices.
-func UdevSyncSupported() bool {
- return DmUdevGetSyncSupport() != 0
-}
-
-// UdevSetSyncSupport allows setting whether the udev sync should be enabled.
-// The return bool indicates the state of whether the sync is enabled.
-func UdevSetSyncSupport(enable bool) bool {
- if enable {
- DmUdevSetSyncSupport(1)
- } else {
- DmUdevSetSyncSupport(0)
- }
-
- return UdevSyncSupported()
-}
-
-// CookieSupported returns whether the version of device-mapper supports the
-// use of cookie's in the tasks.
-// This is largely a lower level call that other functions use.
-func CookieSupported() bool {
- return DmCookieSupported() != 0
-}
-
-// RemoveDevice is a useful helper for cleaning up a device.
-func RemoveDevice(name string) error {
- task, err := TaskCreateNamed(deviceRemove, name)
- if task == nil {
- return err
- }
-
- cookie := new(uint)
- if err := task.setCookie(cookie, 0); err != nil {
- return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
- }
- defer UdevWait(cookie)
-
- dmSawBusy = false // reset before the task is run
- dmSawEnxio = false
- if err = task.run(); err != nil {
- if dmSawBusy {
- return ErrBusy
- }
- if dmSawEnxio {
- return ErrEnxio
- }
- return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
- }
-
- return nil
-}
-
-// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
-func RemoveDeviceDeferred(name string) error {
- logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name)
- defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name)
- task, err := TaskCreateNamed(deviceRemove, name)
- if task == nil {
- return err
- }
-
- if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
- return ErrTaskDeferredRemove
- }
-
- // set a task cookie and disable library fallback, or else libdevmapper will
- // disable udev dm rules and delete the symlink under /dev/mapper by itself,
- // even if the removal is deferred by the kernel.
- cookie := new(uint)
- var flags uint16
- flags = DmUdevDisableLibraryFallback
- if err := task.setCookie(cookie, flags); err != nil {
- return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
- }
-
- // libdevmapper and udev relies on System V semaphore for synchronization,
- // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
- // So these two function call must come in pairs, otherwise semaphores will
- // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
- // will be reached, which will eventually make all following calls to 'task.SetCookie'
- // fail.
- // this call will not wait for the deferred removal's final executing, since no
- // udev event will be generated, and the semaphore's value will not be incremented
- // by udev, what UdevWait is just cleaning up the semaphore.
- defer UdevWait(cookie)
-
- dmSawEnxio = false
- if err = task.run(); err != nil {
- if dmSawEnxio {
- return ErrEnxio
- }
- return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
- }
-
- return nil
-}
-
-// CancelDeferredRemove cancels a deferred remove for a device.
-func CancelDeferredRemove(deviceName string) error {
- task, err := TaskCreateNamed(deviceTargetMsg, deviceName)
- if task == nil {
- return err
- }
-
- if err := task.setSector(0); err != nil {
- return fmt.Errorf("devicemapper: Can't set sector %s", err)
- }
-
- if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
- return fmt.Errorf("devicemapper: Can't set message %s", err)
- }
-
- dmSawBusy = false
- dmSawEnxio = false
- if err := task.run(); err != nil {
- // A device might be being deleted already
- if dmSawBusy {
- return ErrBusy
- } else if dmSawEnxio {
- return ErrEnxio
- }
- return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err)
-
- }
- return nil
-}
-
-// GetBlockDeviceSize returns the size of a block device identified by the specified file.
-func GetBlockDeviceSize(file *os.File) (uint64, error) {
- size, err := ioctlBlkGetSize64(file.Fd())
- if err != nil {
- logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err)
- return 0, ErrGetBlockSize
- }
- return uint64(size), nil
-}
-
-// BlockDeviceDiscard runs discard for the given path.
-// This is used as a workaround for the kernel not discarding block so
-// on the thin pool when we remove a thinp device, so we do it
-// manually
-func BlockDeviceDiscard(path string) error {
- file, err := os.OpenFile(path, os.O_RDWR, 0)
- if err != nil {
- return err
- }
- defer file.Close()
-
- size, err := GetBlockDeviceSize(file)
- if err != nil {
- return err
- }
-
- if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil {
- return err
- }
-
- // Without this sometimes the remove of the device that happens after
- // discard fails with EBUSY.
- unix.Sync()
-
- return nil
-}
-
-// CreatePool is the programmatic example of "dmsetup create".
-// It creates a device with the specified poolName, data and metadata file and block size.
-func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
- task, err := TaskCreateNamed(deviceCreate, poolName)
- if task == nil {
- return err
- }
-
- size, err := GetBlockDeviceSize(dataFile)
- if err != nil {
- return fmt.Errorf("devicemapper: Can't get data size %s", err)
- }
-
- params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
- if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
- return fmt.Errorf("devicemapper: Can't add target %s", err)
- }
-
- cookie := new(uint)
- var flags uint16
- flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
- if err := task.setCookie(cookie, flags); err != nil {
- return fmt.Errorf("devicemapper: Can't set cookie %s", err)
- }
- defer UdevWait(cookie)
-
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
- }
-
- return nil
-}
-
-// ReloadPool is the programmatic example of "dmsetup reload".
-// It reloads the table with the specified poolName, data and metadata file and block size.
-func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
- task, err := TaskCreateNamed(deviceReload, poolName)
- if task == nil {
- return err
- }
-
- size, err := GetBlockDeviceSize(dataFile)
- if err != nil {
- return fmt.Errorf("devicemapper: Can't get data size %s", err)
- }
-
- params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
- if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
- return fmt.Errorf("devicemapper: Can't add target %s", err)
- }
-
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running ReloadPool %s", err)
- }
-
- return nil
-}
-
-// GetDeps is the programmatic example of "dmsetup deps".
-// It outputs a list of devices referenced by the live table for the specified device.
-func GetDeps(name string) (*Deps, error) {
- task, err := TaskCreateNamed(deviceDeps, name)
- if task == nil {
- return nil, err
- }
- if err := task.run(); err != nil {
- return nil, err
- }
- return task.getDeps()
-}
-
-// GetInfo is the programmatic example of "dmsetup info".
-// It outputs some brief information about the device.
-func GetInfo(name string) (*Info, error) {
- task, err := TaskCreateNamed(deviceInfo, name)
- if task == nil {
- return nil, err
- }
- if err := task.run(); err != nil {
- return nil, err
- }
- return task.getInfo()
-}
-
-// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred.
-// It outputs some brief information about the device.
-func GetInfoWithDeferred(name string) (*Info, error) {
- task, err := TaskCreateNamed(deviceInfo, name)
- if task == nil {
- return nil, err
- }
- if err := task.run(); err != nil {
- return nil, err
- }
- return task.getInfoWithDeferred()
-}
-
-// GetDriverVersion is the programmatic example of "dmsetup version".
-// It outputs version information of the driver.
-func GetDriverVersion() (string, error) {
- task := TaskCreate(deviceVersion)
- if task == nil {
- return "", fmt.Errorf("devicemapper: Can't create deviceVersion task")
- }
- if err := task.run(); err != nil {
- return "", err
- }
- return task.getDriverVersion()
-}
-
-// GetStatus is the programmatic example of "dmsetup status".
-// It outputs status information for the specified device name.
-func GetStatus(name string) (uint64, uint64, string, string, error) {
- task, err := TaskCreateNamed(deviceStatus, name)
- if task == nil {
- logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err)
- return 0, 0, "", "", err
- }
- if err := task.run(); err != nil {
- logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err)
- return 0, 0, "", "", err
- }
-
- devinfo, err := task.getInfo()
- if err != nil {
- logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err)
- return 0, 0, "", "", err
- }
- if devinfo.Exists == 0 {
- logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name)
- return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
- }
-
- _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
- return start, length, targetType, params, nil
-}
-
-// GetTable is the programmatic example for "dmsetup table".
-// It outputs the current table for the specified device name.
-func GetTable(name string) (uint64, uint64, string, string, error) {
- task, err := TaskCreateNamed(deviceTable, name)
- if task == nil {
- logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err)
- return 0, 0, "", "", err
- }
- if err := task.run(); err != nil {
- logrus.Debugf("devicemapper: GetTable() Error Run: %s", err)
- return 0, 0, "", "", err
- }
-
- devinfo, err := task.getInfo()
- if err != nil {
- logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err)
- return 0, 0, "", "", err
- }
- if devinfo.Exists == 0 {
- logrus.Debugf("devicemapper: GetTable() Non existing device %s", name)
- return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
- }
-
- _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
- return start, length, targetType, params, nil
-}
-
-// SetTransactionID sets a transaction id for the specified device name.
-func SetTransactionID(poolName string, oldID uint64, newID uint64) error {
- task, err := TaskCreateNamed(deviceTargetMsg, poolName)
- if task == nil {
- return err
- }
-
- if err := task.setSector(0); err != nil {
- return fmt.Errorf("devicemapper: Can't set sector %s", err)
- }
-
- if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil {
- return fmt.Errorf("devicemapper: Can't set message %s", err)
- }
-
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err)
- }
- return nil
-}
-
-// SuspendDevice is the programmatic example of "dmsetup suspend".
-// It suspends the specified device.
-func SuspendDevice(name string) error {
- task, err := TaskCreateNamed(deviceSuspend, name)
- if task == nil {
- return err
- }
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err)
- }
- return nil
-}
-
-// ResumeDevice is the programmatic example of "dmsetup resume".
-// It un-suspends the specified device.
-func ResumeDevice(name string) error {
- task, err := TaskCreateNamed(deviceResume, name)
- if task == nil {
- return err
- }
-
- cookie := new(uint)
- if err := task.setCookie(cookie, 0); err != nil {
- return fmt.Errorf("devicemapper: Can't set cookie %s", err)
- }
- defer UdevWait(cookie)
-
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
- }
-
- return nil
-}
-
-// CreateDevice creates a device with the specified poolName with the specified device id.
-func CreateDevice(poolName string, deviceID int) error {
- logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID)
- task, err := TaskCreateNamed(deviceTargetMsg, poolName)
- if task == nil {
- return err
- }
-
- if err := task.setSector(0); err != nil {
- return fmt.Errorf("devicemapper: Can't set sector %s", err)
- }
-
- if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil {
- return fmt.Errorf("devicemapper: Can't set message %s", err)
- }
-
- dmSawExist = false // reset before the task is run
- if err := task.run(); err != nil {
- // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
- if dmSawExist {
- return ErrDeviceIDExists
- }
-
- return fmt.Errorf("devicemapper: Error running CreateDevice %s", err)
-
- }
- return nil
-}
-
-// DeleteDevice deletes a device with the specified poolName with the specified device id.
-func DeleteDevice(poolName string, deviceID int) error {
- task, err := TaskCreateNamed(deviceTargetMsg, poolName)
- if task == nil {
- return err
- }
-
- if err := task.setSector(0); err != nil {
- return fmt.Errorf("devicemapper: Can't set sector %s", err)
- }
-
- if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil {
- return fmt.Errorf("devicemapper: Can't set message %s", err)
- }
-
- dmSawBusy = false
- if err := task.run(); err != nil {
- if dmSawBusy {
- return ErrBusy
- }
- return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err)
- }
- return nil
-}
-
-// ActivateDevice activates the device identified by the specified
-// poolName, name and deviceID with the specified size.
-func ActivateDevice(poolName string, name string, deviceID int, size uint64) error {
- return activateDevice(poolName, name, deviceID, size, "")
-}
-
-// ActivateDeviceWithExternal activates the device identified by the specified
-// poolName, name and deviceID with the specified size.
-func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error {
- return activateDevice(poolName, name, deviceID, size, external)
-}
-
-func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error {
- task, err := TaskCreateNamed(deviceCreate, name)
- if task == nil {
- return err
- }
-
- var params string
- if len(external) > 0 {
- params = fmt.Sprintf("%s %d %s", poolName, deviceID, external)
- } else {
- params = fmt.Sprintf("%s %d", poolName, deviceID)
- }
- if err := task.addTarget(0, size/512, "thin", params); err != nil {
- return fmt.Errorf("devicemapper: Can't add target %s", err)
- }
- if err := task.setAddNode(addNodeOnCreate); err != nil {
- return fmt.Errorf("devicemapper: Can't add node %s", err)
- }
-
- cookie := new(uint)
- if err := task.setCookie(cookie, 0); err != nil {
- return fmt.Errorf("devicemapper: Can't set cookie %s", err)
- }
-
- defer UdevWait(cookie)
-
- if err := task.run(); err != nil {
- return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
- }
-
- return nil
-}
-
-// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
-func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error {
- task, err := TaskCreateNamed(deviceTargetMsg, poolName)
- if task == nil {
- return err
- }
-
- if err := task.setSector(0); err != nil {
- return fmt.Errorf("devicemapper: Can't set sector %s", err)
- }
-
- if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
- return fmt.Errorf("devicemapper: Can't set message %s", err)
- }
-
- dmSawExist = false // reset before the task is run
- if err := task.run(); err != nil {
- // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
- if dmSawExist {
- return ErrDeviceIDExists
- }
- return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err)
- }
-
- return nil
-}
-
-// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
-func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
- devinfo, _ := GetInfo(baseName)
- doSuspend := devinfo != nil && devinfo.Exists != 0
-
- if doSuspend {
- if err := SuspendDevice(baseName); err != nil {
- return err
- }
- }
-
- if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
- if doSuspend {
- if err2 := ResumeDevice(baseName); err2 != nil {
- return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2)
- }
- }
- return err
- }
-
- if doSuspend {
- if err := ResumeDevice(baseName); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
deleted file mode 100644
index 082fb1ba3..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// +build linux,cgo
-
-package devicemapper
-
-import "C"
-
-import (
- "fmt"
- "strings"
-
- "github.com/sirupsen/logrus"
-)
-
-// DevmapperLogger defines methods required to register as a callback for
-// logging events received from devicemapper. Note that devicemapper will send
-// *all* logs regardless to callbacks (including debug logs) so it's
-// recommended to not spam the console with the outputs.
-type DevmapperLogger interface {
- // DMLog is the logging callback containing all of the information from
- // devicemapper. The interface is identical to the C libdm counterpart.
- DMLog(level int, file string, line int, dmError int, message string)
-}
-
-// dmLogger is the current logger in use that is being forwarded our messages.
-var dmLogger DevmapperLogger
-
-// LogInit changes the logging callback called after processing libdm logs for
-// error message information. The default logger simply forwards all logs to
-// logrus. Calling LogInit(nil) disables the calling of callbacks.
-func LogInit(logger DevmapperLogger) {
- dmLogger = logger
-}
-
-// Due to the way cgo works this has to be in a separate file, as devmapper.go has
-// definitions in the cgo block, which is incompatible with using "//export"
-
-// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that
-// because we are using callbacks, this function will be called for *every* log
-// in libdm (even debug ones because there's no way of setting the verbosity
-// level for an external logging callback).
-//export StorageDevmapperLogCallback
-func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
- msg := C.GoString(message)
-
- // Track what errno libdm saw, because the library only gives us 0 or 1.
- if level < LogLevelDebug {
- if strings.Contains(msg, "busy") {
- dmSawBusy = true
- }
-
- if strings.Contains(msg, "File exists") {
- dmSawExist = true
- }
-
- if strings.Contains(msg, "No such device or address") {
- dmSawEnxio = true
- }
- }
-
- if dmLogger != nil {
- dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
- }
-}
-
-// DefaultLogger is the default logger used by pkg/devicemapper. It forwards
-// all logs that are of higher or equal priority to the given level to the
-// corresponding logrus level.
-type DefaultLogger struct {
- // Level corresponds to the highest libdm level that will be forwarded to
- // logrus. In order to change this, register a new DefaultLogger.
- Level int
-}
-
-// DMLog is the logging callback containing all of the information from
-// devicemapper. The interface is identical to the C libdm counterpart.
-func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) {
- if level <= l.Level {
- // Forward the log to the correct logrus level, if allowed by dmLogLevel.
- logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
- switch level {
- case LogLevelFatal, LogLevelErr:
- logrus.Error(logMsg)
- case LogLevelWarn:
- logrus.Warn(logMsg)
- case LogLevelNotice, LogLevelInfo:
- logrus.Info(logMsg)
- case LogLevelDebug:
- logrus.Debug(logMsg)
- default:
- // Don't drop any "unknown" levels.
- logrus.Info(logMsg)
- }
- }
-}
-
-// registerLogCallback registers our own logging callback function for libdm
-// (which is StorageDevmapperLogCallback).
-//
-// Because libdm only gives us {0,1} error codes we need to parse the logs
-// produced by libdm (to set dmSawBusy and so on). Note that by registering a
-// callback using StorageDevmapperLogCallback, libdm will no longer output logs to
-// stderr so we have to log everything ourselves. None of this handling is
-// optional because we depend on log callbacks to parse the logs, and if we
-// don't forward the log information we'll be in a lot of trouble when
-// debugging things.
-func registerLogCallback() {
- LogWithErrnoInit()
-}
-
-func init() {
- // Use the default logger by default. We only allow LogLevelFatal by
- // default, because internally we mask a lot of libdm errors by retrying
- // and similar tricks. Also, libdm is very chatty and we don't want to
- // worry users for no reason.
- dmLogger = DefaultLogger{
- Level: LogLevelFatal,
- }
-
- // Register as early as possible so we don't miss anything.
- registerLogCallback()
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
deleted file mode 100644
index 190d83d49..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// +build linux,cgo
-
-package devicemapper
-
-/*
-#define _GNU_SOURCE
-#include
-#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
-
-// FIXME: Can't we find a way to do the logging in pure Go?
-extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
-
-static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
-{
- char *buffer = NULL;
- va_list ap;
- int ret;
-
- va_start(ap, f);
- ret = vasprintf(&buffer, f, ap);
- va_end(ap);
- if (ret < 0) {
- // memory allocation failed -- should never happen?
- return;
- }
-
- StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
- free(buffer);
-}
-
-static void log_with_errno_init()
-{
- dm_log_with_errno_init(log_cb);
-}
-*/
-import "C"
-
-import (
- "reflect"
- "unsafe"
-)
-
-type (
- cdmTask C.struct_dm_task
-)
-
-// IOCTL consts
-const (
- BlkGetSize64 = C.BLKGETSIZE64
- BlkDiscard = C.BLKDISCARD
-)
-
-// Devicemapper cookie flags.
-const (
- DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
- DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
- DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
- DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK
-)
-
-// DeviceMapper mapped functions.
-var (
- DmGetLibraryVersion = dmGetLibraryVersionFct
- DmGetNextTarget = dmGetNextTargetFct
- DmSetDevDir = dmSetDevDirFct
- DmTaskAddTarget = dmTaskAddTargetFct
- DmTaskCreate = dmTaskCreateFct
- DmTaskDestroy = dmTaskDestroyFct
- DmTaskGetDeps = dmTaskGetDepsFct
- DmTaskGetInfo = dmTaskGetInfoFct
- DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
- DmTaskRun = dmTaskRunFct
- DmTaskSetAddNode = dmTaskSetAddNodeFct
- DmTaskSetCookie = dmTaskSetCookieFct
- DmTaskSetMessage = dmTaskSetMessageFct
- DmTaskSetName = dmTaskSetNameFct
- DmTaskSetRo = dmTaskSetRoFct
- DmTaskSetSector = dmTaskSetSectorFct
- DmUdevWait = dmUdevWaitFct
- DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
- DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
- DmCookieSupported = dmCookieSupportedFct
- LogWithErrnoInit = logWithErrnoInitFct
- DmTaskDeferredRemove = dmTaskDeferredRemoveFct
- DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
-)
-
-func free(p *C.char) {
- C.free(unsafe.Pointer(p))
-}
-
-func dmTaskDestroyFct(task *cdmTask) {
- C.dm_task_destroy((*C.struct_dm_task)(task))
-}
-
-func dmTaskCreateFct(taskType int) *cdmTask {
- return (*cdmTask)(C.dm_task_create(C.int(taskType)))
-}
-
-func dmTaskRunFct(task *cdmTask) int {
- ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
- return int(ret)
-}
-
-func dmTaskSetNameFct(task *cdmTask, name string) int {
- Cname := C.CString(name)
- defer free(Cname)
-
- return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
-}
-
-func dmTaskSetMessageFct(task *cdmTask, message string) int {
- Cmessage := C.CString(message)
- defer free(Cmessage)
-
- return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
-}
-
-func dmTaskSetSectorFct(task *cdmTask, sector uint64) int {
- return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
-}
-
-func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int {
- cCookie := C.uint32_t(*cookie)
- defer func() {
- *cookie = uint(cCookie)
- }()
- return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
-}
-
-func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int {
- return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
-}
-
-func dmTaskSetRoFct(task *cdmTask) int {
- return int(C.dm_task_set_ro((*C.struct_dm_task)(task)))
-}
-
-func dmTaskAddTargetFct(task *cdmTask,
- start, size uint64, ttype, params string) int {
-
- Cttype := C.CString(ttype)
- defer free(Cttype)
-
- Cparams := C.CString(params)
- defer free(Cparams)
-
- return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
-}
-
-func dmTaskGetDepsFct(task *cdmTask) *Deps {
- Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task))
- if Cdeps == nil {
- return nil
- }
-
- // golang issue: https://github.com/golang/go/issues/11925
- hdr := reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
- Len: int(Cdeps.count),
- Cap: int(Cdeps.count),
- }
- devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
-
- deps := &Deps{
- Count: uint32(Cdeps.count),
- Filler: uint32(Cdeps.filler),
- }
- for _, device := range devices {
- deps.Device = append(deps.Device, uint64(device))
- }
- return deps
-}
-
-func dmTaskGetInfoFct(task *cdmTask, info *Info) int {
- Cinfo := C.struct_dm_info{}
- defer func() {
- info.Exists = int(Cinfo.exists)
- info.Suspended = int(Cinfo.suspended)
- info.LiveTable = int(Cinfo.live_table)
- info.InactiveTable = int(Cinfo.inactive_table)
- info.OpenCount = int32(Cinfo.open_count)
- info.EventNr = uint32(Cinfo.event_nr)
- info.Major = uint32(Cinfo.major)
- info.Minor = uint32(Cinfo.minor)
- info.ReadOnly = int(Cinfo.read_only)
- info.TargetCount = int32(Cinfo.target_count)
- }()
- return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
-}
-
-func dmTaskGetDriverVersionFct(task *cdmTask) string {
- buffer := C.malloc(128)
- defer C.free(buffer)
- res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
- if res == 0 {
- return ""
- }
- return C.GoString((*C.char)(buffer))
-}
-
-func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
- var (
- Cstart, Clength C.uint64_t
- CtargetType, Cparams *C.char
- )
- defer func() {
- *start = uint64(Cstart)
- *length = uint64(Clength)
- *target = C.GoString(CtargetType)
- *params = C.GoString(Cparams)
- }()
-
- nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
- return nextp
-}
-
-func dmUdevSetSyncSupportFct(syncWithUdev int) {
- (C.dm_udev_set_sync_support(C.int(syncWithUdev)))
-}
-
-func dmUdevGetSyncSupportFct() int {
- return int(C.dm_udev_get_sync_support())
-}
-
-func dmUdevWaitFct(cookie uint) int {
- return int(C.dm_udev_wait(C.uint32_t(cookie)))
-}
-
-func dmCookieSupportedFct() int {
- return int(C.dm_cookie_supported())
-}
-
-func logWithErrnoInitFct() {
- C.log_with_errno_init()
-}
-
-func dmSetDevDirFct(dir string) int {
- Cdir := C.CString(dir)
- defer free(Cdir)
-
- return int(C.dm_set_dev_dir(Cdir))
-}
-
-func dmGetLibraryVersionFct(version *string) int {
- buffer := C.CString(string(make([]byte, 128)))
- defer free(buffer)
- defer func() {
- *version = C.GoString(buffer)
- }()
- return int(C.dm_get_library_version(buffer, 128))
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
deleted file mode 100644
index 7f793c270..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// +build linux,cgo,!libdm_no_deferred_remove
-
-package devicemapper
-
-// #include
-import "C"
-
-// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
-const LibraryDeferredRemovalSupport = true
-
-func dmTaskDeferredRemoveFct(task *cdmTask) int {
- return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
-}
-
-func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
- Cinfo := C.struct_dm_info{}
- defer func() {
- info.Exists = int(Cinfo.exists)
- info.Suspended = int(Cinfo.suspended)
- info.LiveTable = int(Cinfo.live_table)
- info.InactiveTable = int(Cinfo.inactive_table)
- info.OpenCount = int32(Cinfo.open_count)
- info.EventNr = uint32(Cinfo.event_nr)
- info.Major = uint32(Cinfo.major)
- info.Minor = uint32(Cinfo.minor)
- info.ReadOnly = int(Cinfo.read_only)
- info.TargetCount = int32(Cinfo.target_count)
- info.DeferredRemove = int(Cinfo.deferred_remove)
- }()
- return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
deleted file mode 100644
index 7d8450898..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build linux,cgo,!static_build
-
-package devicemapper
-
-// #cgo pkg-config: devmapper
-import "C"
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
deleted file mode 100644
index a880fec8c..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build linux,cgo,libdm_no_deferred_remove
-
-package devicemapper
-
-// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
-const LibraryDeferredRemovalSupport = false
-
-func dmTaskDeferredRemoveFct(task *cdmTask) int {
- // Error. Nobody should be calling it.
- return -1
-}
-
-func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
- return -1
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
deleted file mode 100644
index cf7f26a4c..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build linux,cgo,static_build
-
-package devicemapper
-
-// #cgo pkg-config: --static devmapper
-import "C"
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
deleted file mode 100644
index 50ea7c482..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build linux,cgo
-
-package devicemapper
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-func ioctlBlkGetSize64(fd uintptr) (int64, error) {
- var size int64
- if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
- return 0, err
- }
- return size, nil
-}
-
-func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
- var r [2]uint64
- r[0] = offset
- r[1] = length
-
- if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go
deleted file mode 100644
index cee5e5454..000000000
--- a/vendor/github.com/containers/storage/pkg/devicemapper/log.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package devicemapper
-
-// definitions from lvm2 lib/log/log.h
-const (
- LogLevelFatal = 2 + iota // _LOG_FATAL
- LogLevelErr // _LOG_ERR
- LogLevelWarn // _LOG_WARN
- LogLevelNotice // _LOG_NOTICE
- LogLevelInfo // _LOG_INFO
- LogLevelDebug // _LOG_DEBUG
-)
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go
index b0ce706e5..829fe59f3 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory.go
@@ -1,7 +1,6 @@
package directory
import (
- "io/ioutil"
"os"
"path/filepath"
)
@@ -15,7 +14,7 @@ type DiskUsage struct {
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
func MoveToSubdir(oldpath, subdir string) error {
- infos, err := ioutil.ReadDir(oldpath)
+ infos, err := os.ReadDir(oldpath)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
index 8d58d24ca..08060f2a2 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
@@ -1,9 +1,10 @@
-// +build linux darwin freebsd solaris
+//go:build !windows
package directory
import (
- "os"
+ "errors"
+ "io/fs"
"path/filepath"
"syscall"
)
@@ -21,18 +22,22 @@ func Size(dir string) (size int64, err error) {
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
data := make(map[uint64]struct{})
- err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Usage() returns the error.
// if dir/x disappeared while walking, Usage() ignores dir/x.
- if os.IsNotExist(err) && d != dir {
+ if errors.Is(err, fs.ErrNotExist) && d != dir {
return nil
}
return err
}
- if fileInfo == nil {
- return nil
+ fileInfo, err := entry.Info()
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
+ return err
}
// Check inode to only count the sizes of files with multiple hard links once.
@@ -44,9 +49,8 @@ func Usage(dir string) (usage *DiskUsage, err error) {
// inode is not a uint64 on all platforms. Cast it to avoid issues.
data[uint64(inode)] = struct{}{}
-
// Ignore directory sizes
- if fileInfo.IsDir() {
+ if entry.IsDir() {
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
index a7a81240b..c2145c26f 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
@@ -1,9 +1,10 @@
-// +build windows
+//go:build windows
package directory
import (
- "os"
+ "errors"
+ "io/fs"
"path/filepath"
)
@@ -19,11 +20,11 @@ func Size(dir string) (size int64, err error) {
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
- err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Size() returns the error.
// if dir/x disappeared while walking, Size() ignores dir/x.
- if os.IsNotExist(err) && d != dir {
+ if errors.Is(err, fs.ErrNotExist) && path != dir {
return nil
}
return err
@@ -32,16 +33,18 @@ func Usage(dir string) (usage *DiskUsage, err error) {
usage.InodeCount++
// Ignore directory sizes
- if fileInfo == nil {
+ if d.IsDir() {
return nil
}
- s := fileInfo.Size()
- if fileInfo.IsDir() || s == 0 {
- return nil
+ fileInfo, err := d.Info()
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
+ return err
}
-
- usage.Size += s
+ usage.Size += fileInfo.Size()
return nil
})
diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
deleted file mode 100644
index 7df7f3d43..000000000
--- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build linux
-
-package dmesg
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Dmesg returns last messages from the kernel log, up to size bytes
-func Dmesg(size int) []byte {
- t := uintptr(3) // SYSLOG_ACTION_READ_ALL
- b := make([]byte, size)
- amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
- if err != 0 {
- return []byte{}
- }
- return b[:amt]
-}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go
new file mode 100644
index 000000000..eeecc9f75
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go
@@ -0,0 +1,38 @@
+package fileutils
+
+import (
+ "errors"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// Exists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink is followed.
+func Exists(path string) error {
+ // It uses unix.Faccessat which is a faster operation compared to os.Stat for
+ // simply checking the existence of a file.
+ err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
+ if err != nil {
+ return &os.PathError{Op: "faccessat", Path: path, Err: err}
+ }
+ return nil
+}
+
+// Lexists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink itself is checked.
+func Lexists(path string) error {
+ // FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for
+ // faccessat. In this case, the call to faccessat will return EINVAL and
+ // we fall back to using Lstat.
+ err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ if errors.Is(err, syscall.EINVAL) {
+ _, err = os.Lstat(path)
+ return err
+ }
+ return &os.PathError{Op: "faccessat", Path: path, Err: err}
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
new file mode 100644
index 000000000..785b13317
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
@@ -0,0 +1,33 @@
+//go:build !windows && !freebsd
+
+package fileutils
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// Exists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink is followed.
+func Exists(path string) error {
+ // It uses unix.Faccessat which is a faster operation compared to os.Stat for
+ // simply checking the existence of a file.
+ err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
+ if err != nil {
+ return &os.PathError{Op: "faccessat", Path: path, Err: err}
+ }
+ return nil
+}
+
+// Lexists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink itself is checked.
+func Lexists(path string) error {
+ // It uses unix.Faccessat which is a faster operation compared to os.Stat for
+ // simply checking the existence of a file.
+ err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ return &os.PathError{Op: "faccessat", Path: path, Err: err}
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go
new file mode 100644
index 000000000..355cf0464
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go
@@ -0,0 +1,18 @@
+package fileutils
+
+import (
+ "os"
+)
+
+// Exists checks whether a file or directory exists at the given path.
+func Exists(path string) error {
+ _, err := os.Stat(path)
+ return err
+}
+
+// Lexists checks whether a file or directory exists at the given path, without
+// resolving symlinks
+func Lexists(path string) error {
+ _, err := os.Lstat(path)
+ return err
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index b3998fb35..85ce2d526 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -1,6 +1,7 @@
package fileutils
import (
+ "errors"
"fmt"
"io"
"os"
@@ -9,7 +10,6 @@ import (
"strings"
"text/scanner"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +38,7 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
return nil, errors.New("illegal exclusion pattern: \"!\"")
}
newp.exclusion = true
- p = p[1:]
+ p = strings.TrimPrefix(filepath.Clean(p[1:]), "/")
pm.exclusions = true
}
// Do some syntax checking on the pattern.
@@ -165,7 +165,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern {
return pm.patterns
}
-// Pattern defines a single regexp used used to filter file paths.
+// Pattern defines a single regexp used to filter file paths.
type Pattern struct {
cleanedPattern string
dirs []string
@@ -183,7 +183,6 @@ func (p *Pattern) Exclusion() bool {
}
func (p *Pattern) match(path string) (bool, error) {
-
if p.regexp == nil {
if err := p.compile(); err != nil {
return false, filepath.ErrBadPattern
@@ -321,14 +320,14 @@ func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
- return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
- return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
@@ -340,28 +339,28 @@ func ReadSymlinkedDirectory(path string) (string, error) {
// The target of the symbolic link can be a file and a directory.
func ReadSymlinkedPath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
- return "", errors.Wrapf(err, "unable to get absolute path for %q", path)
+ return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to canonicalise path for %q", path)
+ return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
}
- if _, err := os.Stat(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path)
+ if err := Exists(realPath); err != nil {
+ return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err)
}
return realPath, nil
}
// CreateIfNotExists creates a file or a directory only if it does not already exist.
func CreateIfNotExists(path string, isDir bool) error {
- if _, err := os.Stat(path); err != nil {
+ if err := Exists(path); err != nil {
if os.IsNotExist(err) {
if isDir {
- return os.MkdirAll(path, 0755)
+ return os.MkdirAll(path, 0o755)
}
- if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return err
}
- f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ f, err := os.OpenFile(path, os.O_CREATE, 0o755)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
index 9e0e97bd6..3cb250c5a 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -1,10 +1,9 @@
-// +build linux freebsd
+//go:build linux || freebsd
package fileutils
import (
"fmt"
- "io/ioutil"
"os"
"github.com/sirupsen/logrus"
@@ -13,8 +12,8 @@ import (
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
- if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
- logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("%v", err)
} else {
return len(fds)
}
diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
index e6094b55b..6b9a853b7 100644
--- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
+++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
@@ -1,10 +1,9 @@
-// +build linux
+//go:build linux
package fsutils
import (
"fmt"
- "io/ioutil"
"os"
"unsafe"
@@ -12,14 +11,14 @@ import (
)
func locateDummyIfEmpty(path string) (string, error) {
- children, err := ioutil.ReadDir(path)
+ children, err := os.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
- dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+ dummyFile, err := os.CreateTemp(path, "fsutils-dummy")
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go
new file mode 100644
index 000000000..5b21c4b76
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go
@@ -0,0 +1,45 @@
+package fsverity
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// verityDigest struct represents the digest used for verifying the integrity of a file.
+type verityDigest struct {
+ Fsv unix.FsverityDigest
+ Buf [64]byte
+}
+
+// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
+// in read-only mode.
+// The 'description' parameter is a human-readable description of the file.
+func EnableVerity(description string, fd int) error {
+ enableArg := unix.FsverityEnableArg{
+ Version: 1,
+ Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
+ Block_size: 4096,
+ }
+
+ _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
+ if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
+ return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
+ }
+ return nil
+}
+
+// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
+// The 'description' parameter is a human-readable description of the file.
+func MeasureVerity(description string, fd int) (string, error) {
+ var digest verityDigest
+ digest.Fsv.Size = 64
+ _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
+ if e1 != 0 {
+ return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
+ }
+ return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
new file mode 100644
index 000000000..80b9171db
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
@@ -0,0 +1,20 @@
+//go:build !linux
+
+package fsverity
+
+import (
+ "fmt"
+)
+
+// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
+// in read-only mode.
+// The 'description' parameter is a human-readable description of the file.
+func EnableVerity(description string, fd int) error {
+ return fmt.Errorf("fs-verity is not supported on this platform")
+}
+
+// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
+// The 'description' parameter is a human-readable description of the file.
+func MeasureVerity(description string, fd int) (string, error) {
+ return "", fmt.Errorf("fs-verity is not supported on this platform")
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go
new file mode 100644
index 000000000..7eb63b67a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go
@@ -0,0 +1,37 @@
+package homedir
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+)
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetCacheHome returns XDG_CACHE_HOME.
+// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetCacheHome() (string, error) {
+ if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" {
+ return xdgCacheHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_CACHE_HOME or HOME")
+ }
+ return filepath.Join(home, ".cache"), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
deleted file mode 100644
index 06b53854b..000000000
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !linux,!darwin,!freebsd
-
-package homedir
-
-// Copyright 2013-2018 Docker, Inc.
-// NOTE: this package has originally been copied from github.com/docker/docker.
-
-import (
- "errors"
-)
-
-// GetRuntimeDir is unsupported on non-linux system.
-func GetRuntimeDir() (string, error) {
- return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
-}
-
-// StickRuntimeDirContents is unsupported on non-linux system.
-func StickRuntimeDirContents(files []string) ([]string, error) {
- return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
-}
-
-// GetDataHome is unsupported on non-linux system.
-func GetDataHome() (string, error) {
- return "", errors.New("homedir.GetDataHome() is not supported on this system")
-}
-
-// GetConfigHome is unsupported on non-linux system.
-func GetConfigHome() (string, error) {
- return "", errors.New("homedir.GetConfigHome() is not supported on this system")
-}
-
-// GetCacheHome is unsupported on non-linux system.
-func GetCacheHome() (string, error) {
- return "", errors.New("homedir.GetCacheHome() is not supported on this system")
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index 2475e351b..f351b48bb 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package homedir
@@ -6,12 +6,16 @@ package homedir
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
- "errors"
+ "fmt"
"os"
"path/filepath"
+ "strconv"
"strings"
+ "sync"
+ "syscall"
"github.com/containers/storage/pkg/unshare"
+ "github.com/sirupsen/logrus"
)
// Key returns the env var name for the user's home dir based on
@@ -39,18 +43,6 @@ func GetShortcutString() string {
return "~"
}
-// GetRuntimeDir returns XDG_RUNTIME_DIR.
-// XDG_RUNTIME_DIR is typically configured via pam_systemd.
-// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetRuntimeDir() (string, error) {
- if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
- return xdgRuntimeDir, nil
- }
- return "", errors.New("could not get XDG_RUNTIME_DIR")
-}
-
// StickRuntimeDirContents sets the sticky bit on files that are under
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
//
@@ -62,7 +54,7 @@ func StickRuntimeDirContents(files []string) ([]string, error) {
runtimeDir, err := GetRuntimeDir()
if err != nil {
// ignore error if runtimeDir is empty
- return nil, nil
+ return nil, nil //nolint: nilerr
}
runtimeDir, err = filepath.Abs(runtimeDir)
if err != nil {
@@ -94,19 +86,18 @@ func stick(f string) error {
return os.Chmod(f, m)
}
-// GetDataHome returns XDG_DATA_HOME.
-// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetDataHome() (string, error) {
- if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
- return xdgDataHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_DATA_HOME or HOME")
- }
- return filepath.Join(home, ".local", "share"), nil
+var (
+ rootlessConfigHomeDirError error
+ rootlessConfigHomeDirOnce sync.Once
+ rootlessConfigHomeDir string
+ rootlessRuntimeDirOnce sync.Once
+ rootlessRuntimeDir string
+)
+
+// isWriteableOnlyByOwner checks that the specified permission mask allows write
+// access only to the owner.
+func isWriteableOnlyByOwner(perm os.FileMode) bool {
+ return (perm & 0o722) == 0o700
}
// GetConfigHome returns XDG_CONFIG_HOME.
@@ -114,27 +105,78 @@ func GetDataHome() (string, error) {
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
- if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
- return xdgConfigHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
- }
- return filepath.Join(home, ".config"), nil
+ rootlessConfigHomeDirOnce.Do(func() {
+ cfgHomeDir := os.Getenv("XDG_CONFIG_HOME")
+ if cfgHomeDir == "" {
+ home := Get()
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
+ return
+ }
+ tmpDir := filepath.Join(resolvedHome, ".config")
+ _ = os.MkdirAll(tmpDir, 0o700)
+ st, err := os.Stat(tmpDir)
+ if err != nil {
+ rootlessConfigHomeDirError = err
+ return
+ } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() {
+ cfgHomeDir = tmpDir
+ } else {
+ rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir)
+ return
+ }
+ }
+ rootlessConfigHomeDir = cfgHomeDir
+ })
+
+ return rootlessConfigHomeDir, rootlessConfigHomeDirError
}
-// GetCacheHome returns XDG_CACHE_HOME.
-// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set.
+// GetRuntimeDir returns a directory suitable to store runtime files.
+// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
+// directory for the current user.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetCacheHome() (string, error) {
- if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" {
- return xdgCacheHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_CACHE_HOME or HOME")
- }
- return filepath.Join(home, ".cache"), nil
+func GetRuntimeDir() (string, error) {
+ var rootlessRuntimeDirError error
+
+ rootlessRuntimeDirOnce.Do(func() {
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+
+ if runtimeDir != "" {
+ rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir)
+ return
+ }
+
+ uid := strconv.Itoa(unshare.GetRootlessUID())
+ if runtimeDir == "" {
+ tmpDir := filepath.Join("/run", "user", uid)
+ if err := os.MkdirAll(tmpDir, 0o700); err != nil {
+ logrus.Debug(err)
+ }
+ st, err := os.Lstat(tmpDir)
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
+ runtimeDir = tmpDir
+ }
+ }
+ if runtimeDir == "" {
+ tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid))
+ if err := os.MkdirAll(tmpDir, 0o700); err != nil {
+ logrus.Debug(err)
+ }
+ st, err := os.Lstat(tmpDir)
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
+ runtimeDir = tmpDir
+ } else {
+ rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir)
+ return
+ }
+ }
+ rootlessRuntimeDir = runtimeDir
+ })
+
+ return rootlessRuntimeDir, rootlessRuntimeDirError
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
index 4f2615ed3..a76610f90 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -5,6 +5,7 @@ package homedir
import (
"os"
+ "path/filepath"
)
// Key returns the env var name for the user's home dir based on
@@ -17,7 +18,19 @@ func Key() string {
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
- return os.Getenv(Key())
+ home := os.Getenv(Key())
+ if home != "" {
+ return home
+ }
+ home, _ = os.UserHomeDir()
+ return home
+}
+
+// GetConfigHome returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func GetConfigHome() (string, error) {
+ return filepath.Join(Get(), ".config"), nil
}
// GetShortcutString returns the string that is shortcut to user's home directory
@@ -25,3 +38,24 @@ func Get() string {
func GetShortcutString() string {
return "%USERPROFILE%" // be careful while using in format functions
}
+
+// StickRuntimeDirContents is a no-op on Windows
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ return nil, nil
+}
+
+// GetRuntimeDir returns a directory suitable to store runtime files.
+// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
+// directory for the current user.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ data, err := GetDataHome()
+ if err != nil {
+ return "", err
+ }
+ runtimeDir := filepath.Join(data, "containers", "storage")
+ return runtimeDir, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
new file mode 100644
index 000000000..db31da768
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
@@ -0,0 +1,101 @@
+//go:build linux
+
+package idmap
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "runtime"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
+// for the PID process.
+func CreateIDMappedMount(source, target string, pid int) error {
+ path := fmt.Sprintf("/proc/%d/ns/user", pid)
+ userNsFile, err := os.Open(path)
+ if err != nil {
+ return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
+ }
+ defer userNsFile.Close()
+
+ targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
+ if err != nil {
+ return &os.PathError{Op: "open_tree", Path: source, Err: err}
+ }
+ defer unix.Close(targetDirFd)
+
+ if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
+ &unix.MountAttr{
+ Attr_set: unix.MOUNT_ATTR_IDMAP,
+ Userns_fd: uint64(userNsFile.Fd()),
+ }); err != nil {
+ return &os.PathError{Op: "mount_setattr", Path: source, Err: err}
+ }
+ if err := os.Mkdir(target, 0o700); err != nil && !errors.Is(err, fs.ErrExist) {
+ return err
+ }
+
+ if err := unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
+ return &os.PathError{Op: "move_mount", Path: target, Err: err}
+ }
+ return nil
+}
+
+// CreateUsernsProcess forks the current process and creates a user namespace using the specified
+// mappings. It returns the pid of the new process.
+func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
+ var pid uintptr
+ var err syscall.Errno
+
+ if runtime.GOARCH == "s390x" {
+ pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0)
+ } else {
+ pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
+ }
+ if err != 0 {
+ return -1, nil, err
+ }
+ if pid == 0 {
+ _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
+ // just wait for the SIGKILL
+ for {
+ _ = syscall.Pause()
+ }
+ }
+ cleanupFunc := func() {
+ err1 := unix.Kill(int(pid), unix.SIGKILL)
+ if err1 != nil && err1 != syscall.ESRCH {
+ logrus.Warnf("kill process pid: %d with SIGKILL ended with error: %v", int(pid), err1)
+ }
+ if err1 != nil {
+ return
+ }
+ if _, err := unix.Wait4(int(pid), nil, 0, nil); err != nil {
+ logrus.Warnf("wait4 pid: %d ended with error: %v", int(pid), err)
+ }
+ }
+ writeMappings := func(fname string, idmap []idtools.IDMap) error {
+ mappings := ""
+ for _, m := range idmap {
+ mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
+ }
+ return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600)
+ }
+ if err := writeMappings("uid_map", uidMaps); err != nil {
+ cleanupFunc()
+ return -1, nil, err
+ }
+ if err := writeMappings("gid_map", gidMaps); err != nil {
+ cleanupFunc()
+ return -1, nil, err
+ }
+
+ return int(pid), cleanupFunc, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
new file mode 100644
index 000000000..d58137b99
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
@@ -0,0 +1,21 @@
+//go:build !linux
+
+package idmap
+
+import (
+ "fmt"
+
+ "github.com/containers/storage/pkg/idtools"
+)
+
+// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
+// for the PID process.
+func CreateIDMappedMount(source, target string, pid int) error {
+ return fmt.Errorf("IDMapped mounts are not supported")
+}
+
+// CreateUsernsProcess forks the current process and creates a user namespace using the specified
+// mappings. It returns the pid of the new process.
+func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
+ return -1, nil, fmt.Errorf("IDMapped mounts are not supported")
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 34345d145..248594e93 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -2,16 +2,19 @@ package idtools
import (
"bufio"
+ "errors"
"fmt"
"os"
"os/user"
+ "runtime"
"sort"
"strconv"
"strings"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// IDMap contains a single entry for user namespace range remapping. An array
@@ -35,8 +38,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
- subuidFileName string = "/etc/subuid"
- subgidFileName string = "/etc/subgid"
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+ ContainersOverrideXattr = "user.containers.override_stat"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
@@ -82,7 +86,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(uidMap) == 1 && uidMap[0].Size == 1 {
uid = uidMap[0].HostID
} else {
- uid, err = toHost(0, uidMap)
+ uid, err = RawToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
@@ -90,7 +94,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(gidMap) == 1 && gidMap[0].Size == 1 {
gid = gidMap[0].HostID
} else {
- gid, err = toHost(0, gidMap)
+ gid, err = RawToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
@@ -98,10 +102,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return uid, gid, nil
}
-// toContainer takes an id mapping, and uses it to translate a
-// host ID to the remapped ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id
-func toContainer(hostID int, idMap []IDMap) (int, error) {
+// RawToContainer takes an id mapping, and uses it to translate a host ID to
+// the remapped ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
@@ -111,13 +119,17 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
return contID, nil
}
}
- return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+ return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
}
-// toHost takes an id mapping and a remapped ID, and translates the
-// ID to the mapped host ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id #
-func toHost(contID int, idMap []IDMap) (int, error) {
+// RawToHost takes an id mapping and a remapped ID, and translates the ID to
+// the mapped host ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
@@ -127,7 +139,7 @@ func toHost(contID int, idMap []IDMap) (int, error) {
return hostID, nil
}
}
- return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+ return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
}
// IDPair is a UID and GID pair
@@ -146,19 +158,19 @@ type IDMappings struct {
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func NewIDMappings(username, groupname string) (*IDMappings, error) {
- subuidRanges, err := parseSubuid(username)
+ subuidRanges, err := readSubuid(username)
if err != nil {
return nil, err
}
- subgidRanges, err := parseSubgid(groupname)
+ subgidRanges, err := readSubgid(groupname)
if err != nil {
return nil, err
}
if len(subuidRanges) == 0 {
- return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName)
+ return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName)
}
if len(subgidRanges) == 0 {
- return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName)
+ return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName)
}
return &IDMappings{
@@ -182,31 +194,87 @@ func (i *IDMappings) RootPair() IDPair {
}
// ToHost returns the host UID and GID for the container uid, gid.
-// Remapping is only performed if the ids aren't already the remapped root ids
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
+ var err error
+ var target IDPair
+
+ target.UID, err = RawToHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+
+ target.GID, err = RawToHost(pair.GID, i.gids)
+ return target, err
+}
+
+var (
+ overflowUIDOnce sync.Once
+ overflowGIDOnce sync.Once
+ overflowUID int
+ overflowGID int
+)
+
+// getOverflowUID returns the UID mapped to the overflow user
+func getOverflowUID() int {
+ overflowUIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
+ overflowUID = 65534
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowUID = tmp
+ }
+ }
+ })
+ return overflowUID
+}
+
+// getOverflowGID returns the GID mapped to the overflow user
+func getOverflowGID() int {
+ overflowGIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
+ overflowGID = 65534
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowGID = tmp
+ }
+ }
+ })
+ return overflowGID
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+// If the mapping is not possible because the target ID is not mapped into
+// the namespace, then the overflow ID is used.
+func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) {
var err error
target := i.RootPair()
if pair.UID != target.UID {
- target.UID, err = toHost(pair.UID, i.uids)
+ target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
- return target, err
+ target.UID = getOverflowUID()
+ logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID)
}
}
if pair.GID != target.GID {
- target.GID, err = toHost(pair.GID, i.gids)
+ target.GID, err = RawToHost(pair.GID, i.gids)
+ if err != nil {
+ target.GID = getOverflowGID()
+ logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID)
+ }
}
- return target, err
+ return target, nil
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
- uid, err := toContainer(pair.UID, i.uids)
+ uid, err := RawToContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
- gid, err := toContainer(pair.GID, i.gids)
+ gid, err := RawToContainer(pair.GID, i.gids)
return uid, gid, err
}
@@ -244,14 +312,6 @@ func createIDMap(subidRanges ranges) []IDMap {
return idMap
}
-func parseSubuid(username string) (ranges, error) {
- return parseSubidFile(subuidFileName, username)
-}
-
-func parseSubgid(username string) (ranges, error) {
- return parseSubidFile(subgidFileName, username)
-}
-
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
@@ -282,16 +342,16 @@ func parseSubidFile(path, username string) (ranges, error) {
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
- return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") {
startid, err := strconv.Atoi(parts[1])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
@@ -300,13 +360,89 @@ func parseSubidFile(path, username string) (ranges, error) {
}
func checkChownErr(err error, name string, uid, gid int) error {
- if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
- return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name)
+ var e *os.PathError
+ if errors.As(err, &e) && e.Err == syscall.EINVAL {
+ return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
}
return err
}
+// Stat contains file states that can be overridden with ContainersOverrideXattr.
+type Stat struct {
+ IDs IDPair
+ Mode os.FileMode
+}
+
+// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
+// that can be used as the value for the ContainersOverrideXattr xattr.
+func FormatContainersOverrideXattr(uid, gid, mode int) string {
+ return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777)
+}
+
+// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
+func GetContainersOverrideXattr(path string) (Stat, error) {
+ var stat Stat
+ xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
+ if err != nil {
+ return stat, err
+ }
+
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) != 3 {
+ return stat, fmt.Errorf("The number of clons in %s does not equal to 3",
+ ContainersOverrideXattr)
+ }
+
+ value, err := strconv.ParseUint(attrs[0], 10, 32)
+ if err != nil {
+ return stat, fmt.Errorf("Failed to parse UID: %w", err)
+ }
+
+ stat.IDs.UID = int(value)
+
+ value, err = strconv.ParseUint(attrs[0], 10, 32)
+ if err != nil {
+ return stat, fmt.Errorf("Failed to parse GID: %w", err)
+ }
+
+ stat.IDs.GID = int(value)
+
+ value, err = strconv.ParseUint(attrs[2], 8, 32)
+ if err != nil {
+ return stat, fmt.Errorf("Failed to parse mode: %w", err)
+ }
+
+ stat.Mode = os.FileMode(value)
+
+ return stat, nil
+}
+
+// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
+func SetContainersOverrideXattr(path string, stat Stat) error {
+ value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode))
+ return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
+}
+
func SafeChown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode os.FileMode = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = os.FileMode(val)
+ }
+ }
+ }
+ value := Stat{IDPair{uid, gid}, mode}
+ if err = SetContainersOverrideXattr(name, value); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Stat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
@@ -316,6 +452,25 @@ func SafeChown(name string, uid, gid int) error {
}
func SafeLchown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode os.FileMode = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = os.FileMode(val)
+ }
+ }
+ }
+ value := Stat{IDPair{uid, gid}, mode}
+ if err = SetContainersOverrideXattr(name, value); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Lstat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
new file mode 100644
index 000000000..2bd26d0e3
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -0,0 +1,86 @@
+//go:build linux && cgo && libsubid
+
+package idtools
+
+import (
+ "errors"
+ "os/user"
+ "unsafe"
+)
+
+/*
+#cgo LDFLAGS: -l subid
+#include
+#include
+#include
+const char *Prog = "storage";
+FILE *shadow_logfd = NULL;
+
+struct subid_range get_range(struct subid_range *ranges, int i)
+{
+ shadow_logfd = stderr;
+ return ranges[i];
+}
+
+#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
+# define subid_get_uid_ranges get_subuid_ranges
+# define subid_get_gid_ranges get_subgid_ranges
+#endif
+
+*/
+import "C"
+
+func readSubid(username string, isUser bool) (ranges, error) {
+ var ret ranges
+ uidstr := ""
+
+ if username == "ALL" {
+ return nil, errors.New("username ALL not supported")
+ }
+
+ if u, err := user.Lookup(username); err == nil {
+ uidstr = u.Uid
+ }
+
+ cUsername := C.CString(username)
+ defer C.free(unsafe.Pointer(cUsername))
+
+ cuidstr := C.CString(uidstr)
+ defer C.free(unsafe.Pointer(cuidstr))
+
+ var nRanges C.int
+ var cRanges *C.struct_subid_range
+ if isUser {
+ nRanges = C.subid_get_uid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges)
+ }
+ } else {
+ nRanges = C.subid_get_gid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges)
+ }
+ }
+ if nRanges < 0 {
+ return nil, errors.New("cannot read subids")
+ }
+ defer C.free(unsafe.Pointer(cRanges))
+
+ for i := 0; i < int(nRanges); i++ {
+ r := C.get_range(cRanges, C.int(i))
+ newRange := subIDRange{
+ Start: int(r.start),
+ Length: int(r.count),
+ }
+ ret = append(ret, newRange)
+ }
+ return ret, nil
+}
+
+func readSubuid(username string) (ranges, error) {
+ return readSubid(username, true)
+}
+
+func readSubgid(username string) (ranges, error) {
+ return readSubid(username, false)
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 9776b2a12..1da7dadbf 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package idtools
@@ -12,8 +12,9 @@ import (
"sync"
"syscall"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/moby/sys/user"
)
var (
@@ -46,12 +47,15 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
+ if !filepath.IsAbs(dirPath) {
+ return fmt.Errorf("path: %s should be absolute", dirPath)
+ }
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
- if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
@@ -87,13 +91,13 @@ func CanAccess(path string, pair IDPair) bool {
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
- if isOwner && (perms&0100 == 0100) {
+ if isOwner && (perms&0o100 == 0o100) {
return true
}
- if isGroup && (perms&0010 == 0010) {
+ if isGroup && (perms&0o010 == 0o010) {
return true
}
- if perms&0001 == 0001 {
+ if perms&0o001 == 0o001 {
return true
}
return false
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
new file mode 100644
index 000000000..e6f5c1ba6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
@@ -0,0 +1,11 @@
+//go:build !linux || !libsubid || !cgo
+
+package idtools
+
+func readSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func readSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
index 16be94f44..ec6a3a046 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package idtools
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
index 1c819a1f9..042d0ea95 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/parser.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -11,22 +11,22 @@ import (
func parseTriple(spec []string) (container, host, size uint32, err error) {
cid, err := strconv.ParseUint(spec[0], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err)
}
hid, err := strconv.ParseUint(spec[1], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err)
}
sz, err := strconv.ParseUint(spec[2], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err)
}
return uint32(cid), uint32(hid), uint32(sz), nil
}
// ParseIDMap parses idmap triples from string.
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
- stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
+ stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
for _, idMapSpec := range mapSpec {
if idMapSpec == "" {
continue
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
index 9da7975e2..ac27718de 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -2,11 +2,12 @@ package idtools
import (
"fmt"
- "regexp"
"sort"
"strconv"
"strings"
"sync"
+
+ "github.com/containers/storage/pkg/regexp"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
@@ -24,7 +25,7 @@ var (
"usermod": "-%s %d-%d %s",
}
- idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
@@ -37,32 +38,32 @@ var (
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
- return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("adding user %q: %w", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
- return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
- return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
- return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err)
}
return uid, gid, nil
}
@@ -77,67 +78,66 @@ func addUser(userName string) error {
}
})
if userCommand == "" {
- return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
- return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
-
// first, we should verify that ranges weren't automatically created
// by the distro tooling
- ranges, err := parseSubuid(name)
+ ranges, err := readSubuid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subuid range: %v", err)
+ return fmt.Errorf("can't find available subuid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err)
}
}
- ranges, err = parseSubgid(name)
+ ranges, err = readSubgid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subgid range: %v", err)
+ return fmt.Errorf("can't find available subgid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
- ranges, err := parseSubuid("ALL")
+ ranges, err := readSubuid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
- ranges, err := parseSubgid("ALL")
+ ranges, err := readSubgid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
index d98b354cb..e37c4540c 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux
package idtools
@@ -8,5 +8,5 @@ import "fmt"
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
- return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+ return -1, -1, fmt.Errorf("no support for adding users or groups on this OS")
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
index 9703ecbd9..f34462a23 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package idtools
@@ -18,12 +18,12 @@ func resolveBinary(binname string) (string, error) {
if err != nil {
return "", err
}
- //only return no error if the final resolved binary basename
- //matches what was searched for
+ // only return no error if the final resolved binary basename
+ // matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
- return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+ return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
index cd12470f9..fd6addd73 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -2,9 +2,9 @@ package ioutils
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
+ "time"
)
// AtomicFileWriterOptions specifies options for creating the atomic file writer.
@@ -14,9 +14,26 @@ type AtomicFileWriterOptions struct {
// storage after it has been written and before it is moved to
// the specified path.
NoSync bool
+ // On successful return from Close() this is set to the mtime of the
+ // newly written file.
+ ModTime time.Time
+ // Specifies whether Commit() must be explicitly called to write state
+ // to the destination. This allows an application to preserve the original
+ // file when an error occurs during processing (and not just during write)
+ // The default is false, which will auto-commit on Close
+ ExplicitCommit bool
}
-var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{}
+type CommittableWriter interface {
+ io.WriteCloser
+
+ // Commit closes the temporary file associated with this writer, and
+ // provided no errors (during commit or previously during write operations),
+ // will publish the completed file under the intended destination.
+ Commit() error
+}
+
+var defaultWriterOptions = AtomicFileWriterOptions{}
// SetDefaultOptions overrides the default options used when creating an
// atomic file writer.
@@ -24,11 +41,21 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) {
defaultWriterOptions = opts
}
-// NewAtomicFileWriterWithOpts returns WriteCloser so that writing to it writes to a
-// temporary file and closing it atomically changes the temporary file to
-// destination path. Writing and closing concurrently is not allowed.
-func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
- f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+// NewAtomicFileWriterWithOpts returns a CommittableWriter so that writing to it
+// writes to a temporary file, which can later be committed to a destination path,
+// either by Closing in the case of auto-commit, or manually calling commit if the
+// ExplicitCommit option is enabled. Writing and closing concurrently is not
+// allowed.
+func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (CommittableWriter, error) {
+ return newAtomicFileWriter(filename, perm, opts)
+}
+
+// newAtomicFileWriter returns a CommittableWriter so that writing to it writes to
+// a temporary file, which can later be committed to a destination path, either by
+// Closing in the case of auto-commit, or manually calling commit if the
+// ExplicitCommit option is enabled. Writing and closing concurrently is not allowed.
+func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) {
+ f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
@@ -40,43 +67,56 @@ func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *Atomic
return nil, err
}
return &atomicFileWriter{
- f: f,
- fn: abspath,
- perm: perm,
- noSync: opts.NoSync,
+ f: f,
+ fn: abspath,
+ perm: perm,
+ noSync: opts.NoSync,
+ explicitCommit: opts.ExplicitCommit,
}, nil
}
-// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
-// temporary file and closing it atomically changes the temporary file to
-// destination path. Writing and closing concurrently is not allowed.
-func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+// NewAtomicFileWriterWithOpts returns a CommittableWriter, with auto-commit enabled.
+// Writing to it writes to a temporary file and closing it atomically changes the
+// temporary file to destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (CommittableWriter, error) {
return NewAtomicFileWriterWithOpts(filename, perm, nil)
}
// AtomicWriteFile atomically writes data to a file named by filename.
-func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
- f, err := NewAtomicFileWriter(filename, perm)
+func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error {
+ f, err := newAtomicFileWriter(filename, perm, opts)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
- f.(*atomicFileWriter).writeErr = err
+ f.writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1
}
+
+ if opts != nil {
+ opts.ModTime = f.modTime
+ }
+
return err
}
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ return AtomicWriteFileWithOpts(filename, data, perm, nil)
+}
+
type atomicFileWriter struct {
- f *os.File
- fn string
- writeErr error
- perm os.FileMode
- noSync bool
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+ noSync bool
+ modTime time.Time
+ closed bool
+ explicitCommit bool
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
@@ -87,27 +127,76 @@ func (w *atomicFileWriter) Write(dt []byte) (int, error) {
return n, err
}
-func (w *atomicFileWriter) Close() (retErr error) {
+func (w *atomicFileWriter) closeTempFile() error {
+ if w.closed {
+ return nil
+ }
+
+ w.closed = true
+ return w.f.Close()
+}
+
+func (w *atomicFileWriter) Close() error {
+ return w.complete(!w.explicitCommit)
+}
+
+func (w *atomicFileWriter) Commit() error {
+ return w.complete(true)
+}
+
+func (w *atomicFileWriter) complete(commit bool) (retErr error) {
+ if w == nil || w.closed {
+ return nil
+ }
+
defer func() {
+ err := w.closeTempFile()
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
- }()
- if !w.noSync {
- if err := fdatasync(w.f); err != nil {
- w.f.Close()
- return err
+ if retErr == nil {
+ retErr = err
}
+ }()
+
+ if commit {
+ return w.commitState()
}
- if err := w.f.Close(); err != nil {
+
+ return nil
+}
+
+func (w *atomicFileWriter) commitState() error {
+ // Perform a data only sync (fdatasync()) if supported
+ if err := w.postDataWrittenSync(); err != nil {
return err
}
- if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+
+ // Capture fstat before closing the fd
+ info, err := w.f.Stat()
+ if err != nil {
return err
}
+ w.modTime = info.ModTime()
+
+ if err := w.f.Chmod(w.perm); err != nil {
+ return err
+ }
+
+ // Perform full sync on platforms that need it
+ if err := w.preRenameSync(); err != nil {
+ return err
+ }
+
+ // Some platforms require closing before rename (Windows)
+ if err := w.closeTempFile(); err != nil {
+ return err
+ }
+
if w.writeErr == nil {
return os.Rename(w.f.Name(), w.fn)
}
+
return nil
}
@@ -124,7 +213,7 @@ type AtomicWriteSet struct {
// commit. If no temporary directory is given the system
// default is used.
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
- td, err := ioutil.TempDir(tmpDir, "write-set-")
+ td, err := os.MkdirTemp(tmpDir, "write-set-")
if err != nil {
return nil, err
}
@@ -159,7 +248,7 @@ func (w syncFileCloser) Close() error {
if !defaultWriterOptions.NoSync {
return w.File.Close()
}
- err := fdatasync(w.File)
+ err := dataOrFullSync(w.File)
if err1 := w.File.Close(); err == nil {
err = err1
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go
index 0da78a063..10ed48cfd 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go
@@ -6,6 +6,18 @@ import (
"golang.org/x/sys/unix"
)
-func fdatasync(f *os.File) error {
+func dataOrFullSync(f *os.File) error {
return unix.Fdatasync(int(f.Fd()))
}
+
+func (w *atomicFileWriter) postDataWrittenSync() error {
+ if w.noSync {
+ return nil
+ }
+ return unix.Fdatasync(int(w.f.Fd()))
+}
+
+func (w *atomicFileWriter) preRenameSync() error {
+ // On Linux data can be reliably flushed to media without metadata, so defer
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go
new file mode 100644
index 000000000..2ccdc3108
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go
@@ -0,0 +1,25 @@
+//go:build !linux
+
+package ioutils
+
+import (
+ "os"
+)
+
+func dataOrFullSync(f *os.File) error {
+ return f.Sync()
+}
+
+func (w *atomicFileWriter) postDataWrittenSync() error {
+ // many platforms (Mac, Windows) require a full sync to reliably flush to media
+ return nil
+}
+
+func (w *atomicFileWriter) preRenameSync() error {
+ if w.noSync {
+ return nil
+ }
+
+ // fsync() on Non-linux Unix, FlushFileBuffers (Windows), F_FULLFSYNC (Mac)
+ return w.f.Sync()
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go
deleted file mode 100644
index 79a094035..000000000
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !linux
-
-package ioutils
-
-import (
- "os"
-)
-
-func fdatasync(f *os.File) error {
- return f.Sync()
-}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
index 63f3c07f4..146e1a5ff 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
@@ -1,11 +1,10 @@
package ioutils
import (
+ "context"
"crypto/sha256"
"encoding/hex"
"io"
-
- "golang.org/x/net/context"
)
type readCloserWrapper struct {
@@ -17,8 +16,25 @@ func (r *readCloserWrapper) Close() error {
return r.closer()
}
+type readWriteToCloserWrapper struct {
+ io.Reader
+ io.WriterTo
+ closer func() error
+}
+
+func (r *readWriteToCloserWrapper) Close() error {
+ return r.closer()
+}
+
// NewReadCloserWrapper returns a new io.ReadCloser.
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ if wt, ok := r.(io.WriterTo); ok {
+ return &readWriteToCloserWrapper{
+ Reader: r,
+ WriterTo: wt,
+ closer: closer,
+ }
+ }
return &readCloserWrapper{
Reader: r,
closer: closer,
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
index 1539ad21b..257b064c5 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
@@ -1,10 +1,10 @@
-// +build !windows
+//go:build !windows
package ioutils
-import "io/ioutil"
+import "os"
-// TempDir on Unix systems is equivalent to ioutil.TempDir.
+// TempDir on Unix systems is equivalent to os.MkdirTemp.
func TempDir(dir, prefix string) (string, error) {
- return ioutil.TempDir(dir, prefix)
+ return os.MkdirTemp(dir, prefix)
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
index c719c120b..79837fb33 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
@@ -1,16 +1,16 @@
-// +build windows
+//go:build windows
package ioutils
import (
- "io/ioutil"
+ "os"
"github.com/containers/storage/pkg/longpath"
)
-// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
- tempDir, err := ioutil.TempDir(dir, prefix)
+ tempDir, err := os.MkdirTemp(dir, prefix)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go
new file mode 100644
index 000000000..93fb1fea8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go
@@ -0,0 +1,82 @@
+package lockfile
+
+import (
+ "bytes"
+ cryptorand "crypto/rand"
+ "encoding/binary"
+ "os"
+ "sync/atomic"
+ "time"
+)
+
+// LastWrite is an opaque identifier of the last write to some *LockFile.
+// It can be used by users of a *LockFile to determine if the lock indicates changes
+// since the last check.
+//
+// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
+type LastWrite struct {
+ // Never modify fields of a LastWrite object; it has value semantics.
+ state []byte // Contents of the lock file.
+}
+
+var lastWriterIDCounter uint64 // Private state for newLastWriterID
+
+const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
+// newLastWrite returns a new "last write" ID.
+// The value must be different on every call, and also differ from values
+// generated by other processes.
+func newLastWrite() LastWrite {
+ // The ID is (PID, time, per-process counter, random)
+ // PID + time represents both a unique process across reboots,
+ // and a specific time within the process; the per-process counter
+ // is an extra safeguard for in-process concurrency.
+ // The random part disambiguates across process namespaces
+ // (where PID values might collide), serves as a general-purpose
+ // extra safety, _and_ is used to pad the output to lastWriterIDSize,
+ // because other versions of this code exist and they don't work
+ // efficiently if the size of the value changes.
+ pid := os.Getpid()
+ tm := time.Now().UnixNano()
+ counter := atomic.AddUint64(&lastWriterIDCounter, 1)
+
+ res := make([]byte, lastWriterIDSize)
+ binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
+ binary.LittleEndian.PutUint64(res[8:16], counter)
+ binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
+ if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
+ panic(err) // This shouldn't happen
+ }
+
+ return LastWrite{
+ state: res,
+ }
+}
+
+// serialize returns bytes to write to the lock file to represent the specified write.
+func (lw LastWrite) serialize() []byte {
+ if lw.state == nil {
+ panic("LastWrite.serialize on an uninitialized object")
+ }
+ return lw.state
+}
+
+// Equals returns true if lw matches other
+func (lw LastWrite) equals(other LastWrite) bool {
+ if lw.state == nil {
+ panic("LastWrite.equals on an uninitialized object")
+ }
+ if other.state == nil {
+ panic("LastWrite.equals with an uninitialized counterparty")
+ }
+ return bytes.Equal(lw.state, other.state)
+}
+
+// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
+func newLastWriteFromData(serialized []byte) LastWrite {
+ if serialized == nil {
+ panic("newLastWriteFromData with nil data")
+ }
+ return LastWrite{
+ state: serialized,
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
index 6a00141c3..52f6c0a62 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
@@ -1,16 +1,18 @@
package lockfile
import (
+ "fmt"
+ "os"
"path/filepath"
"sync"
"time"
-
- "github.com/pkg/errors"
)
// A Locker represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
+//
+// Deprecated: Refer directly to *LockFile, the provided implementation, instead.
type Locker interface {
// Acquire a writer lock.
// The default unix implementation panics if:
@@ -18,10 +20,6 @@ type Locker interface {
// - tried to lock a read-only lock-file
Lock()
- // Acquire a writer lock recursively, allowing for recursive acquisitions
- // within the same process space.
- RecursiveLock()
-
// Unlock the lock.
// The default unix implementation panics if:
// - unlocking an unlocked lock
@@ -33,10 +31,13 @@ type Locker interface {
// Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held.
+ //
+ // Deprecated: Use *LockFile.RecordWrite.
Touch() error
// Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held.
+ // Deprecated: Use *LockFile.ModifiedSince.
Modified() (bool, error)
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
@@ -45,63 +46,419 @@ type Locker interface {
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
- // Locked() checks if lock is locked for writing by a thread in this process
- Locked() bool
+ // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking.
+ // It might do nothing at all, or it may panic if the caller is not the owner of this lock.
+ AssertLocked()
+
+ // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking.
+ // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing.
+ AssertLockedForWriting()
+}
+
+type lockType byte
+
+const (
+ readLock lockType = iota
+ writeLock
+)
+
+// LockFile represents a file lock where the file is used to cache an
+// identifier of the last party that made changes to whatever's being protected
+// by the lock.
+//
+// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
+type LockFile struct {
+ // The following fields are only set when constructing *LockFile, and must never be modified afterwards.
+ // They are safe to access without any other locking.
+ file string
+ ro bool
+
+ // rwMutex serializes concurrent reader-writer acquisitions in the same process space
+ rwMutex *sync.RWMutex
+ // stateMutex is used to synchronize concurrent accesses to the state below
+ stateMutex *sync.Mutex
+ counter int64
+ lw LastWrite // A global value valid as of the last .Touch() or .Modified()
+ lockType lockType
+ locked bool
+ // The following fields are only modified on transitions between counter == 0 / counter != 0.
+ // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
+ // In other cases, they need to be protected using stateMutex.
+ fd fileHandle
}
var (
- lockfiles map[string]Locker
- lockfilesLock sync.Mutex
+ lockFiles map[string]*LockFile
+ lockFilesLock sync.Mutex
)
+// GetLockFile opens a read-write lock file, creating it if necessary. The
+// *LockFile object may already be locked if the path has already been requested
+// by the current process.
+func GetLockFile(path string) (*LockFile, error) {
+ return getLockfile(path, false)
+}
+
// GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
+//
+// Deprecated: Use GetLockFile
func GetLockfile(path string) (Locker, error) {
- return getLockfile(path, false)
+ return GetLockFile(path)
+}
+
+// GetROLockFile opens a read-only lock file, creating it if necessary. The
+// *LockFile object may already be locked if the path has already been requested
+// by the current process.
+func GetROLockFile(path string) (*LockFile, error) {
+ return getLockfile(path, true)
}
// GetROLockfile opens a read-only lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
+//
+// Deprecated: Use GetROLockFile
func GetROLockfile(path string) (Locker, error) {
- return getLockfile(path, true)
+ return GetROLockFile(path)
+}
+
+// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
+func (l *LockFile) Lock() {
+ if l.ro {
+ panic("can't take write lock on read-only lock file")
+ }
+ l.lock(writeLock)
+}
+
+// RLock locks the lockfile as a reader.
+func (l *LockFile) RLock() {
+ l.lock(readLock)
+}
+
+// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one.
+func (l *LockFile) TryLock() error {
+ if l.ro {
+ panic("can't take write lock on read-only lock file")
+ }
+ return l.tryLock(writeLock)
+}
+
+// TryRLock attempts to lock the lockfile as a reader.
+func (l *LockFile) TryRLock() error {
+ return l.tryLock(readLock)
+}
+
+// Unlock unlocks the lockfile.
+func (l *LockFile) Unlock() {
+ l.stateMutex.Lock()
+ if !l.locked {
+ // Panic when unlocking an unlocked lock. That's a violation
+ // of the lock semantics and will reveal such.
+ panic("calling Unlock on unlocked lock")
+ }
+ l.counter--
+ if l.counter < 0 {
+ // Panic when the counter is negative. There is no way we can
+ // recover from a corrupted lock and we need to protect the
+ // storage from corruption.
+ panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
+ }
+ if l.counter == 0 {
+ // We should only release the lock when the counter is 0 to
+ // avoid releasing read-locks too early; a given process may
+ // acquire a read lock multiple times.
+ l.locked = false
+ // Close the file descriptor on the last unlock, releasing the
+ // file lock.
+ unlockAndCloseHandle(l.fd)
+ }
+ if l.lockType == readLock {
+ l.rwMutex.RUnlock()
+ } else {
+ l.rwMutex.Unlock()
+ }
+ l.stateMutex.Unlock()
+}
+
+func (l *LockFile) AssertLocked() {
+ // DO NOT provide a variant that returns the value of l.locked.
+ //
+ // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
+ // we can’t tell the difference.
+ //
+ // Hence, this “AssertLocked†method, which exists only for sanity checks.
+
+ // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
+ // with no possible writers.
+ // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
+ // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
+ if !l.locked {
+ panic("internal error: lock is not held by the expected owner")
+ }
+}
+
+func (l *LockFile) AssertLockedForWriting() {
+ // DO NOT provide a variant that returns the current lock state.
+ //
+ // The same caveats as for AssertLocked apply equally.
+
+ l.AssertLocked()
+ // Like AssertLocked, don’t even bother with l.stateMutex.
+ if l.lockType == readLock {
+ panic("internal error: lock is not held for writing")
+ }
+}
+
+// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
+// and returns the one to record instead.
+//
+// If ModifiedSince reports no modification, the previous LastWrite value
+// is still valid and can continue to be used.
+//
+// If this function fails, the LastWriter value of the lock is indeterminate;
+// the caller should fail and keep using the previously-recorded LastWrite value,
+// so that it continues failing until the situation is resolved. Similarly,
+// it should only update the recorded LastWrite value after processing the update:
+//
+// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
+// if err != nil { /* fail */ }
+// state.lastWrite = lw2
+// if modified {
+// if err := reload(); err != nil { /* fail */ }
+// state.lastWrite = lw2
+// }
+//
+// The caller must hold the lock (for reading or writing).
+func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
+ l.AssertLocked()
+ currentLW, err := l.GetLastWrite()
+ if err != nil {
+ return LastWrite{}, false, err
+ }
+ modified := !previous.equals(currentLW)
+ return currentLW, modified, nil
}
-// getLockfile returns a Locker object, possibly (depending on the platform)
+// Modified indicates if the lockfile has been updated since the last time it
+// was loaded.
+// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
+// Callers cannot, in general, rely on this, because that might have happened for some other
+// owner of the same *LockFile who created it previously.
+//
+// Deprecated: Use *LockFile.ModifiedSince.
+func (l *LockFile) Modified() (bool, error) {
+ l.stateMutex.Lock()
+ if !l.locked {
+ panic("attempted to check last-writer in lockfile without locking it first")
+ }
+ defer l.stateMutex.Unlock()
+ oldLW := l.lw
+ // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it.
+ currentLW, modified, err := l.ModifiedSince(oldLW)
+ if err != nil {
+ return true, err
+ }
+ l.lw = currentLW
+ return modified, nil
+}
+
+// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
+//
+// Deprecated: Use *LockFile.RecordWrite.
+func (l *LockFile) Touch() error {
+ lw, err := l.RecordWrite()
+ if err != nil {
+ return err
+ }
+ l.stateMutex.Lock()
+ if !l.locked || (l.lockType == readLock) {
+ panic("attempted to update last-writer in lockfile without the write lock")
+ }
+ defer l.stateMutex.Unlock()
+ l.lw = lw
+ return nil
+}
+
+// IsReadWrite indicates if the lock file is a read-write lock.
+func (l *LockFile) IsReadWrite() bool {
+ return !l.ro
+}
+
+// getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
-// If ro, the lock is a read-write lock and the returned Locker should correspond to the
+// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock,
-// or a read-write lock and Locker should correspond to the “lock for writing†(exclusive) operation.
+// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
-func getLockfile(path string, ro bool) (Locker, error) {
- lockfilesLock.Lock()
- defer lockfilesLock.Unlock()
- if lockfiles == nil {
- lockfiles = make(map[string]Locker)
+func getLockfile(path string, ro bool) (*LockFile, error) {
+ lockFilesLock.Lock()
+ defer lockFilesLock.Unlock()
+ if lockFiles == nil {
+ lockFiles = make(map[string]*LockFile)
}
cleanPath, err := filepath.Abs(path)
if err != nil {
- return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
+ return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
- if locker, ok := lockfiles[cleanPath]; ok {
- if ro && locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
+ if lockFile, ok := lockFiles[cleanPath]; ok {
+ if ro && lockFile.IsReadWrite() {
+ return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
}
- if !ro && !locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
+ if !ro && !lockFile.IsReadWrite() {
+ return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
}
- return locker, nil
+ return lockFile, nil
}
- locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker
+ lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile
if err != nil {
return nil, err
}
- lockfiles[cleanPath] = locker
- return locker, nil
+ lockFiles[cleanPath] = lockFile
+ return lockFile, nil
+}
+
+// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
+// working inter-process and associated with the specified path.
+//
+// This function will be called at most once for each path value within a single process.
+//
+// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
+// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock,
+// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation.
+//
+// WARNING:
+// - The lock may or MAY NOT be inter-process.
+// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
+// - Even if ro, the lock MAY be exclusive.
+func createLockFileForPath(path string, ro bool) (*LockFile, error) {
+ // Check if we can open the lock.
+ fd, err := openLock(path, ro)
+ if err != nil {
+ return nil, err
+ }
+ unlockAndCloseHandle(fd)
+
+ lType := writeLock
+ if ro {
+ lType = readLock
+ }
+
+ return &LockFile{
+ file: path,
+ ro: ro,
+
+ rwMutex: &sync.RWMutex{},
+ stateMutex: &sync.Mutex{},
+ lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
+ lockType: lType,
+ locked: false,
+ }, nil
+}
+
+// openLock opens the file at path and returns the corresponding file
+// descriptor. The path is opened either read-only or read-write,
+// depending on the value of ro argument.
+//
+// openLock will create the file and its parent directories,
+// if necessary.
+func openLock(path string, ro bool) (fd fileHandle, err error) {
+ flags := os.O_CREATE
+ if ro {
+ flags |= os.O_RDONLY
+ } else {
+ flags |= os.O_RDWR
+ }
+ fd, err = openHandle(path, flags)
+ if err == nil {
+ return fd, nil
+ }
+
+ // the directory of the lockfile seems to be removed, try to create it
+ if os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
+ return fd, fmt.Errorf("creating lock file directory: %w", err)
+ }
+
+ return openLock(path, ro)
+ }
+
+ return fd, &os.PathError{Op: "open", Path: path, Err: err}
+}
+
+// lock locks the lockfile via syscall based on the specified type and
+// command.
+func (l *LockFile) lock(lType lockType) {
+ if lType == readLock {
+ l.rwMutex.RLock()
+ } else {
+ l.rwMutex.Lock()
+ }
+ l.stateMutex.Lock()
+ defer l.stateMutex.Unlock()
+ if l.counter == 0 {
+ // If we're the first reference on the lock, we need to open the file again.
+ fd, err := openLock(l.file, l.ro)
+ if err != nil {
+ panic(err)
+ }
+ l.fd = fd
+
+ // Optimization: only use the (expensive) syscall when
+ // the counter is 0. In this case, we're either the first
+ // reader lock or a writer lock.
+ if err := lockHandle(l.fd, lType, false); err != nil {
+ panic(err)
+ }
+ }
+ l.lockType = lType
+ l.locked = true
+ l.counter++
+}
+
+// lock locks the lockfile via syscall based on the specified type and
+// command.
+func (l *LockFile) tryLock(lType lockType) error {
+ var success bool
+ var rwMutexUnlocker func()
+ if lType == readLock {
+ success = l.rwMutex.TryRLock()
+ rwMutexUnlocker = l.rwMutex.RUnlock
+ } else {
+ success = l.rwMutex.TryLock()
+ rwMutexUnlocker = l.rwMutex.Unlock
+ }
+ if !success {
+ return fmt.Errorf("resource temporarily unavailable")
+ }
+ l.stateMutex.Lock()
+ defer l.stateMutex.Unlock()
+ if l.counter == 0 {
+ // If we're the first reference on the lock, we need to open the file again.
+ fd, err := openLock(l.file, l.ro)
+ if err != nil {
+ rwMutexUnlocker()
+ return err
+ }
+ l.fd = fd
+
+ // Optimization: only use the (expensive) syscall when
+ // the counter is 0. In this case, we're either the first
+ // reader lock or a writer lock.
+ if err = lockHandle(l.fd, lType, true); err != nil {
+ closeHandle(fd)
+ rwMutexUnlocker()
+ return err
+ }
+ }
+ l.lockType = lType
+ l.locked = true
+ l.counter++
+ return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index 0a92da2c0..885f2f88a 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -1,250 +1,106 @@
-// +build linux solaris darwin freebsd
+//go:build !windows
package lockfile
import (
- "fmt"
- "os"
- "sync"
"time"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
-type lockfile struct {
- // rwMutex serializes concurrent reader-writer acquisitions in the same process space
- rwMutex *sync.RWMutex
- // stateMutex is used to synchronize concurrent accesses to the state below
- stateMutex *sync.Mutex
- counter int64
- file string
- fd uintptr
- lw string
- locktype int16
- locked bool
- ro bool
- recursive bool
-}
+type fileHandle uintptr
-// openLock opens the file at path and returns the corresponding file
-// descriptor. Note that the path is opened read-only when ro is set. If ro
-// is unset, openLock will open the path read-write and create the file if
-// necessary.
-func openLock(path string, ro bool) (int, error) {
- if ro {
- return unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0)
+// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
+// This is typically called before (_not after_) loading the state when initializing a consumer
+// of the data protected by the lock.
+// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
+//
+// The caller must hold the lock (for reading or writing).
+func (l *LockFile) GetLastWrite() (LastWrite, error) {
+ l.AssertLocked()
+ contents := make([]byte, lastWriterIDSize)
+ n, err := unix.Pread(int(l.fd), contents, 0)
+ if err != nil {
+ return LastWrite{}, err
}
- return unix.Open(path, os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH)
+ // It is important to handle the partial read case, because
+ // the initial size of the lock file is zero, which is a valid
+ // state (no writes yet)
+ contents = contents[:n]
+ return newLastWriteFromData(contents), nil
}
-// createLockerForPath returns a Locker object, possibly (depending on the platform)
-// working inter-process and associated with the specified path.
+// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
-// This function will be called at most once for each path value within a single process.
+// If this function fails, the LastWriter value of the lock is indeterminate;
+// the caller should keep using the previously-recorded LastWrite value,
+// and possibly detecting its own modification as an external one:
//
-// If ro, the lock is a read-write lock and the returned Locker should correspond to the
-// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock,
-// or a read-write lock and Locker should correspond to the “lock for writing†(exclusive) operation.
+// lw, err := state.lock.RecordWrite()
+// if err != nil { /* fail */ }
+// state.lastWrite = lw
//
-// WARNING:
-// - The lock may or MAY NOT be inter-process.
-// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
-// - Even if ro, the lock MAY be exclusive.
-func createLockerForPath(path string, ro bool) (Locker, error) {
- // Check if we can open the lock.
- fd, err := openLock(path, ro)
+// The caller must hold the lock for writing.
+func (l *LockFile) RecordWrite() (LastWrite, error) {
+ l.AssertLockedForWriting()
+ lw := newLastWrite()
+ lockContents := lw.serialize()
+ n, err := unix.Pwrite(int(l.fd), lockContents, 0)
if err != nil {
- return nil, errors.Wrapf(err, "error opening %q", path)
- }
- unix.Close(fd)
-
- locktype := unix.F_WRLCK
- if ro {
- locktype = unix.F_RDLCK
- }
- return &lockfile{
- stateMutex: &sync.Mutex{},
- rwMutex: &sync.RWMutex{},
- file: path,
- lw: stringid.GenerateRandomID(),
- locktype: int16(locktype),
- locked: false,
- ro: ro}, nil
-}
-
-// lock locks the lockfile via FCTNL(2) based on the specified type and
-// command.
-func (l *lockfile) lock(lType int16, recursive bool) {
- lk := unix.Flock_t{
- Type: lType,
- Whence: int16(os.SEEK_SET),
- Start: 0,
- Len: 0,
- }
- switch lType {
- case unix.F_RDLCK:
- l.rwMutex.RLock()
- case unix.F_WRLCK:
- if recursive {
- // NOTE: that's okay as recursive is only set in RecursiveLock(), so
- // there's no need to protect against hypothetical RDLCK cases.
- l.rwMutex.RLock()
- } else {
- l.rwMutex.Lock()
- }
- default:
- panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
- }
- l.stateMutex.Lock()
- defer l.stateMutex.Unlock()
- if l.counter == 0 {
- // If we're the first reference on the lock, we need to open the file again.
- fd, err := openLock(l.file, l.ro)
- if err != nil {
- panic(fmt.Sprintf("error opening %q: %v", l.file, err))
- }
- l.fd = uintptr(fd)
-
- // Optimization: only use the (expensive) fcntl syscall when
- // the counter is 0. In this case, we're either the first
- // reader lock or a writer lock.
- for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
- time.Sleep(10 * time.Millisecond)
- }
+ return LastWrite{}, err
}
- l.locktype = lType
- l.locked = true
- l.recursive = recursive
- l.counter++
-}
-
-// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
-func (l *lockfile) Lock() {
- if l.ro {
- panic("can't take write lock on read-only lock file")
- } else {
- l.lock(unix.F_WRLCK, false)
- }
-}
-
-// RecursiveLock locks the lockfile as a writer but allows for recursive
-// acquisitions within the same process space. Note that RLock() will be called
-// if it's a lockTypReader lock.
-func (l *lockfile) RecursiveLock() {
- if l.ro {
- l.RLock()
- } else {
- l.lock(unix.F_WRLCK, true)
+ if n != len(lockContents) {
+ return LastWrite{}, unix.ENOSPC
}
+ return lw, nil
}
-// LockRead locks the lockfile as a reader.
-func (l *lockfile) RLock() {
- l.lock(unix.F_RDLCK, false)
-}
-
-// Unlock unlocks the lockfile.
-func (l *lockfile) Unlock() {
- l.stateMutex.Lock()
- if l.locked == false {
- // Panic when unlocking an unlocked lock. That's a violation
- // of the lock semantics and will reveal such.
- panic("calling Unlock on unlocked lock")
- }
- l.counter--
- if l.counter < 0 {
- // Panic when the counter is negative. There is no way we can
- // recover from a corrupted lock and we need to protect the
- // storage from corruption.
- panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
- }
- if l.counter == 0 {
- // We should only release the lock when the counter is 0 to
- // avoid releasing read-locks too early; a given process may
- // acquire a read lock multiple times.
- l.locked = false
- // Close the file descriptor on the last unlock, releasing the
- // file lock.
- unix.Close(int(l.fd))
- }
- if l.locktype == unix.F_RDLCK || l.recursive {
- l.rwMutex.RUnlock()
- } else {
- l.rwMutex.Unlock()
+// TouchedSince indicates if the lock file has been touched since the specified time
+func (l *LockFile) TouchedSince(when time.Time) bool {
+ st, err := system.Fstat(int(l.fd))
+ if err != nil {
+ return true
}
- l.stateMutex.Unlock()
-}
-
-// Locked checks if lockfile is locked for writing by a thread in this process.
-func (l *lockfile) Locked() bool {
- l.stateMutex.Lock()
- defer l.stateMutex.Unlock()
- return l.locked && (l.locktype == unix.F_WRLCK)
+ mtim := st.Mtim()
+ touched := time.Unix(mtim.Unix())
+ return when.Before(touched)
}
-// Touch updates the lock file with the UID of the user.
-func (l *lockfile) Touch() error {
- l.stateMutex.Lock()
- if !l.locked || (l.locktype != unix.F_WRLCK) {
- panic("attempted to update last-writer in lockfile without the write lock")
- }
- defer l.stateMutex.Unlock()
- l.lw = stringid.GenerateRandomID()
- id := []byte(l.lw)
- _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
- if err != nil {
- return err
- }
- n, err := unix.Write(int(l.fd), id)
- if err != nil {
- return err
- }
- if n != len(id) {
- return unix.ENOSPC
- }
- return nil
+func openHandle(path string, mode int) (fileHandle, error) {
+ mode |= unix.O_CLOEXEC
+ fd, err := unix.Open(path, mode, 0o644)
+ return fileHandle(fd), err
}
-// Modified indicates if the lockfile has been updated since the last time it
-// was loaded.
-func (l *lockfile) Modified() (bool, error) {
- l.stateMutex.Lock()
- id := []byte(l.lw)
- if !l.locked {
- panic("attempted to check last-writer in lockfile without locking it first")
+func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
+ fType := unix.F_RDLCK
+ if lType != readLock {
+ fType = unix.F_WRLCK
}
- defer l.stateMutex.Unlock()
- _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
- if err != nil {
- return true, err
+ lk := unix.Flock_t{
+ Type: int16(fType),
+ Whence: int16(unix.SEEK_SET),
+ Start: 0,
+ Len: 0,
}
- n, err := unix.Read(int(l.fd), id)
- if err != nil {
- return true, err
+ cmd := unix.F_SETLKW
+ if nonblocking {
+ cmd = unix.F_SETLK
}
- if n != len(id) {
- return true, nil
+ for {
+ err := unix.FcntlFlock(uintptr(fd), cmd, &lk)
+ if err == nil || nonblocking {
+ return err
+ }
+ time.Sleep(10 * time.Millisecond)
}
- lw := l.lw
- l.lw = string(id)
- return l.lw != lw, nil
}
-// IsReadWriteLock indicates if the lock file is a read-write lock.
-func (l *lockfile) IsReadWrite() bool {
- return !l.ro
+func unlockAndCloseHandle(fd fileHandle) {
+ unix.Close(int(fd))
}
-// TouchedSince indicates if the lock file has been touched since the specified time
-func (l *lockfile) TouchedSince(when time.Time) bool {
- st, err := system.Fstat(int(l.fd))
- if err != nil {
- return true
- }
- mtim := st.Mtim()
- touched := time.Unix(mtim.Unix())
- return when.Before(touched)
+func closeHandle(fd fileHandle) {
+ unix.Close(int(fd))
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
index 82bd91db9..0cc1c50cc 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
@@ -1,75 +1,109 @@
-// +build windows
+//go:build windows
package lockfile
import (
"os"
- "sync"
"time"
-)
-// createLockerForPath returns a Locker object, possibly (depending on the platform)
-// working inter-process and associated with the specified path.
-//
-// This function will be called at most once for each path value within a single process.
-//
-// If ro, the lock is a read-write lock and the returned Locker should correspond to the
-// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock,
-// or a read-write lock and Locker should correspond to the “lock for writing†(exclusive) operation.
-//
-// WARNING:
-// - The lock may or MAY NOT be inter-process.
-// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
-// - Even if ro, the lock MAY be exclusive.
-func createLockerForPath(path string, ro bool) (Locker, error) {
- return &lockfile{locked: false}, nil
-}
+ "golang.org/x/sys/windows"
+)
-type lockfile struct {
- mu sync.Mutex
- file string
- locked bool
-}
+const (
+ reserved = 0
+ allBytes = ^uint32(0)
+)
-func (l *lockfile) Lock() {
- l.mu.Lock()
- l.locked = true
-}
+type fileHandle windows.Handle
-func (l *lockfile) RecursiveLock() {
- // We don't support Windows but a recursive writer-lock in one process-space
- // is really a writer lock, so just panic.
- panic("not supported")
+// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
+// This is typically called before (_not after_) loading the state when initializing a consumer
+// of the data protected by the lock.
+// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
+//
+// The caller must hold the lock (for reading or writing) before this function is called.
+func (l *LockFile) GetLastWrite() (LastWrite, error) {
+ l.AssertLocked()
+ contents := make([]byte, lastWriterIDSize)
+ ol := new(windows.Overlapped)
+ var n uint32
+ err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol)
+ if err != nil && err != windows.ERROR_HANDLE_EOF {
+ return LastWrite{}, err
+ }
+ // It is important to handle the partial read case, because
+ // the initial size of the lock file is zero, which is a valid
+ // state (no writes yet)
+ contents = contents[:n]
+ return newLastWriteFromData(contents), nil
}
-func (l *lockfile) RLock() {
- l.mu.Lock()
- l.locked = true
+// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
+//
+// If this function fails, the LastWriter value of the lock is indeterminate;
+// the caller should keep using the previously-recorded LastWrite value,
+// and possibly detecting its own modification as an external one:
+//
+// lw, err := state.lock.RecordWrite()
+// if err != nil { /* fail */ }
+// state.lastWrite = lw
+//
+// The caller must hold the lock for writing.
+func (l *LockFile) RecordWrite() (LastWrite, error) {
+ l.AssertLockedForWriting()
+ lw := newLastWrite()
+ lockContents := lw.serialize()
+ ol := new(windows.Overlapped)
+ var n uint32
+ err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol)
+ if err != nil {
+ return LastWrite{}, err
+ }
+ if int(n) != len(lockContents) {
+ return LastWrite{}, windows.ERROR_DISK_FULL
+ }
+ return lw, nil
}
-func (l *lockfile) Unlock() {
- l.locked = false
- l.mu.Unlock()
+// TouchedSince indicates if the lock file has been touched since the specified time
+func (l *LockFile) TouchedSince(when time.Time) bool {
+ stat, err := os.Stat(l.file)
+ if err != nil {
+ return true
+ }
+ return when.Before(stat.ModTime())
}
-func (l *lockfile) Locked() bool {
- return l.locked
+func openHandle(path string, mode int) (fileHandle, error) {
+ mode |= windows.O_CLOEXEC
+ fd, err := windows.Open(path, mode, windows.S_IWRITE)
+ return fileHandle(fd), err
}
-func (l *lockfile) Modified() (bool, error) {
- return false, nil
-}
-func (l *lockfile) Touch() error {
+func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
+ flags := 0
+ if lType != readLock {
+ flags = windows.LOCKFILE_EXCLUSIVE_LOCK
+ }
+ if nonblocking {
+ flags |= windows.LOCKFILE_FAIL_IMMEDIATELY
+ }
+ ol := new(windows.Overlapped)
+ if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
+ if nonblocking {
+ return err
+ }
+ panic(err)
+ }
return nil
}
-func (l *lockfile) IsReadWrite() bool {
- return false
+
+func unlockAndCloseHandle(fd fileHandle) {
+ ol := new(windows.Overlapped)
+ windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
+ closeHandle(fd)
}
-func (l *lockfile) TouchedSince(when time.Time) bool {
- stat, err := os.Stat(l.file)
- if err != nil {
- return true
- }
- return when.Before(stat.ModTime())
+func closeHandle(fd fileHandle) {
+ windows.Close(windows.Handle(fd))
}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index e2cf30b41..4ad634517 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -1,14 +1,16 @@
-// +build linux,cgo
+//go:build linux && cgo
package loopback
import (
"errors"
"fmt"
+ "io/fs"
"os"
"syscall"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
)
// Loopback related errors
@@ -25,7 +27,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 {
}
func getNextFreeLoopbackIndex() (int, error) {
- f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
+ f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644)
if err != nil {
return 0, err
}
@@ -38,99 +40,114 @@ func getNextFreeLoopbackIndex() (int, error) {
return index, err
}
-func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
+func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File, error) {
// Read information about the loopback file.
var st syscall.Stat_t
- err = syscall.Fstat(int(sparseFile.Fd()), &st)
- if err != nil {
- logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err)
+ if err := syscall.Fstat(int(sparseFile.Fd()), &st); err != nil {
+ logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err)
return nil, ErrAttachLoopbackDevice
}
+ // upper bound to avoid infinite loop
+ remaining := 1000
+
// Start looking for a free /dev/loop
for {
+ if remaining == 0 {
+ logrus.Errorf("No free loopback devices available")
+ return nil, ErrAttachLoopbackDevice
+ }
+ remaining--
+
+ index, err := getNextFreeLoopbackIndex()
+ if err != nil {
+ logrus.Debugf("Error retrieving the next available loopback: %s", err)
+ return nil, err
+ }
+
target := fmt.Sprintf("/dev/loop%d", index)
- index++
- fi, err := os.Stat(target)
+ // OpenFile adds O_CLOEXEC
+ loopFile, err := os.OpenFile(target, os.O_RDWR, 0o644)
if err != nil {
- if os.IsNotExist(err) {
- logrus.Error("There are no more loopback devices available.")
+ // The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so
+ // just treat ENXIO as if the device does not exist.
+ if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) {
+ // Another process could have taken the loopback device in the meantime. So repeat
+ // the process with the next loopback device.
+ continue
}
+ logrus.Errorf("Opening loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
+ fi, err := loopFile.Stat()
+ if err != nil {
+ loopFile.Close()
+ logrus.Errorf("Stat loopback device: %s", err)
+ return nil, ErrAttachLoopbackDevice
+ }
if fi.Mode()&os.ModeDevice != os.ModeDevice {
+ loopFile.Close()
logrus.Errorf("Loopback device %s is not a block device.", target)
continue
}
- // OpenFile adds O_CLOEXEC
- loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
- if err != nil {
- logrus.Errorf("Error opening loopback device: %s", err)
- return nil, ErrAttachLoopbackDevice
- }
-
// Try to attach to the loop file
if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
loopFile.Close()
// If the error is EBUSY, then try the next loopback
- if err != syscall.EBUSY {
- logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
- return nil, ErrAttachLoopbackDevice
+ if err == syscall.EBUSY {
+ continue
}
- // Otherwise, we keep going with the loop
- continue
+ logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
+ return nil, ErrAttachLoopbackDevice
}
// Check if the loopback driver and underlying filesystem agree on the loopback file's
// device and inode numbers.
dev, ino, err := getLoopbackBackingFile(loopFile)
if err != nil {
- logrus.Errorf("Error getting loopback backing file: %s", err)
+ logrus.Errorf("Getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
if dev != uint64(st.Dev) || ino != st.Ino {
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
}
-
- // In case of success, we finished. Break the loop.
- break
- }
-
- // This can't happen, but let's be sure
- if loopFile == nil {
- logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
- return nil, ErrAttachLoopbackDevice
+ return loopFile, nil
}
-
- return loopFile, nil
}
// AttachLoopDevice attaches the given sparse file to the next
// available loopback device. It returns an opened *os.File.
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
+ return attachLoopDevice(sparseName, false)
+}
- // Try to retrieve the next available loopback device via syscall.
- // If it fails, we discard error and start looping for a
- // loopback from index 0.
- startIndex, err := getNextFreeLoopbackIndex()
- if err != nil {
- logrus.Debugf("Error retrieving the next available loopback: %s", err)
- }
+// AttachLoopDeviceRO attaches the given sparse file opened read-only to
+// the next available loopback device. It returns an opened *os.File.
+func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
+ return attachLoopDevice(sparseName, true)
+}
+
+func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
+ var sparseFile *os.File
// OpenFile adds O_CLOEXEC
- sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
+ if readonly {
+ sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644)
+ } else {
+ sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644)
+ }
if err != nil {
- logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
+ logrus.Errorf("Opening sparse file: %v", err)
return nil, ErrAttachLoopbackDevice
}
defer sparseFile.Close()
- loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
+ loopFile, err := openNextAvailableLoopback(sparseName, sparseFile)
if err != nil {
return nil, err
}
@@ -147,7 +164,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// If the call failed, then free the loopback device
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
- logrus.Error("Error while cleaning up the loopback device")
+ logrus.Error("While cleaning up the loopback device")
}
loopFile.Close()
return nil, ErrAttachLoopbackDevice
diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
index ea6841958..cf77e5232 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
@@ -1,4 +1,4 @@
-// +build linux,cgo
+//go:build linux && cgo
package loopback
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
index a50de7f07..2a4f5cbcf 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
@@ -1,4 +1,4 @@
-// +build linux,cgo
+//go:build linux && cgo
package loopback
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
index f4cf2826e..2d8821279 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -1,4 +1,4 @@
-// +build linux,cgo
+//go:build linux && cgo
package loopback
@@ -13,7 +13,7 @@ import (
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
if err != nil {
- logrus.Errorf("Error get loopback backing file: %s", err)
+ logrus.Errorf("Get loopback backing file: %v", err)
return 0, 0, ErrGetLoopbackBackingFile
}
return loopInfo.loDevice, loopInfo.loInode, nil
@@ -22,7 +22,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
// SetCapacity reloads the size for the loopback device.
func SetCapacity(file *os.File) error {
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
- logrus.Errorf("Error loopbackSetCapacity: %s", err)
+ logrus.Errorf("loopbackSetCapacity: %s", err)
return ErrSetCapacity
}
return nil
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
index 07a0f4847..40a229932 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
}
continue
}
- opt := strings.SplitN(option, "=", 2)
- if len(opt) != 2 || !validFlags[opt[0]] {
- return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ opt, _, ok := strings.Cut(option, "=")
+ if !ok || !validFlags[opt] {
+ return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
- if !dataCollisions[opt[0]] {
+ if !dataCollisions[opt] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
- dataCollisions[opt[0]] = true
+ dataCollisions[opt] = true
}
}
@@ -140,9 +140,9 @@ func ParseOptions(options string) (int, string) {
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := ParseOptions(options)
for _, o := range strings.Split(data, ",") {
- opt := strings.SplitN(o, "=", 2)
- if !validFlags[opt[0]] {
- return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ opt, _, _ := strings.Cut(o, "=")
+ if !validFlags[opt] {
+ return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
}
}
return flags, data, nil
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
new file mode 100644
index 000000000..3ba99cf93
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MNT_SYNCHRONOUS
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MNT_UPDATE
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MNT_NOATIME
+
+ mntDetach = unix.MNT_FORCE
+
+ NODIRATIME = 0
+ NODEV = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ BIND = 0
+ RBIND = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SLAVE = 0
+ RSLAVE = 0
+ SHARED = 0
+ RSHARED = 0
+ RELATIME = 0
+ STRICTATIME = 0
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
index 9afd26d4c..e581d64eb 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux && !freebsd
package mount
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index b31cf99d0..61d6d1c59 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -1,3 +1,5 @@
+//go:build freebsd && cgo
+
package mount
/*
@@ -28,14 +30,21 @@ func allocateIOVecs(options []string) []C.struct_iovec {
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
- xs := strings.Split(data, ",")
- for _, x := range xs {
- if x == "bind" {
- isNullFS = true
+ options := []string{"fspath", target}
+
+ if data != "" {
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ continue
+ }
+ name, val, _ := strings.Cut(x, "=")
+ options = append(options, name)
+ options = append(options, val)
}
}
- options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
@@ -48,7 +57,7 @@ func mount(device, target, mType string, flag uintptr, data string) error {
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
- return fmt.Errorf("Failed to call nmount: %s", reason)
+ return fmt.Errorf("failed to call nmount: %s", reason)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
index 9d20cfbf8..b9dc82d3f 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd
+//go:build !linux && !(freebsd && cgo)
package mount
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
index cbc0299fb..2d9e75ea1 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
@@ -1,5 +1,18 @@
package mount
-import "github.com/moby/sys/mountinfo"
+import (
+ "fmt"
+ "os"
-var PidMountInfo = mountinfo.PidMountInfo
+ "github.com/moby/sys/mountinfo"
+)
+
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return mountinfo.GetMountsFromReader(f, nil)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
index 1d1afeee2..331272e0c 100644
--- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
+++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
@@ -1,16 +1,28 @@
-// +build !windows
+//go:build !windows
package mount
-import "golang.org/x/sys/unix"
+import (
+ "time"
+
+ "golang.org/x/sys/unix"
+)
func unmount(target string, flags int) error {
- err := unix.Unmount(target, flags)
- if err == nil || err == unix.EINVAL {
- // Ignore "not mounted" error here. Note the same error
- // can be returned if flags are invalid, so this code
- // assumes that the flags value is always correct.
- return nil
+ var err error
+ for range 50 {
+ err = unix.Unmount(target, flags)
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ case unix.EINVAL, nil:
+ // Ignore "not mounted" error here. Note the same error
+ // can be returned if flags are invalid, so this code
+ // assumes that the flags value is always correct.
+ return nil
+ }
+ break
}
return &mountError{
diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
index eebc4ab84..3c942bfb2 100644
--- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package mount
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
deleted file mode 100644
index 7738fc741..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// +build !windows
-
-// Package kernel provides helper function to get, parse and compare kernel
-// versions for different platforms.
-package kernel
-
-import (
- "errors"
- "fmt"
-)
-
-// VersionInfo holds information about the kernel.
-type VersionInfo struct {
- Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4)
- Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1)
- Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2)
- Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic)
-}
-
-func (k *VersionInfo) String() string {
- return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
-}
-
-// CompareKernelVersion compares two kernel.VersionInfo structs.
-// Returns -1 if a < b, 0 if a == b, 1 it a > b
-func CompareKernelVersion(a, b VersionInfo) int {
- if a.Kernel < b.Kernel {
- return -1
- } else if a.Kernel > b.Kernel {
- return 1
- }
-
- if a.Major < b.Major {
- return -1
- } else if a.Major > b.Major {
- return 1
- }
-
- if a.Minor < b.Minor {
- return -1
- } else if a.Minor > b.Minor {
- return 1
- }
-
- return 0
-}
-
-// ParseRelease parses a string and creates a VersionInfo based on it.
-func ParseRelease(release string) (*VersionInfo, error) {
- var (
- kernel, major, minor, parsed int
- flavor, partial string
- )
-
- // Ignore error from Sscanf to allow an empty flavor. Instead, just
- // make sure we got all the version numbers.
- parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
- if parsed < 2 {
- return nil, errors.New("Can't parse kernel version " + release)
- }
-
- // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
- parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
- if parsed < 1 {
- flavor = partial
- }
-
- return &VersionInfo{
- Kernel: kernel,
- Major: major,
- Minor: minor,
- Flavor: flavor,
- }, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
deleted file mode 100644
index 71f205b28..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build darwin
-
-// Package kernel provides helper function to get, parse and compare kernel
-// versions for different platforms.
-package kernel
-
-import (
- "fmt"
- "os/exec"
- "strings"
-
- "github.com/mattn/go-shellwords"
-)
-
-// GetKernelVersion gets the current kernel version.
-func GetKernelVersion() (*VersionInfo, error) {
- release, err := getRelease()
- if err != nil {
- return nil, err
- }
-
- return ParseRelease(release)
-}
-
-// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version
-func getRelease() (string, error) {
- cmd := exec.Command("system_profiler", "SPSoftwareDataType")
- osName, err := cmd.Output()
- if err != nil {
- return "", err
- }
-
- var release string
- data := strings.Split(string(osName), "\n")
- for _, line := range data {
- if strings.Contains(line, "Kernel Version") {
- // It has the format like ' Kernel Version: Darwin 14.5.0'
- content := strings.SplitN(line, ":", 2)
- if len(content) != 2 {
- return "", fmt.Errorf("Kernel Version is invalid")
- }
-
- prettyNames, err := shellwords.Parse(content[1])
- if err != nil {
- return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error())
- }
-
- if len(prettyNames) != 2 {
- return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ")
- }
- release = prettyNames[1]
- }
- }
-
- return release, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
deleted file mode 100644
index 76e1e499f..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build linux freebsd solaris openbsd
-
-// Package kernel provides helper function to get, parse and compare kernel
-// versions for different platforms.
-package kernel
-
-import (
- "bytes"
-
- "github.com/sirupsen/logrus"
-)
-
-// GetKernelVersion gets the current kernel version.
-func GetKernelVersion() (*VersionInfo, error) {
- uts, err := uname()
- if err != nil {
- return nil, err
- }
-
- release := make([]byte, len(uts.Release))
-
- i := 0
- for _, c := range uts.Release {
- release[i] = byte(c)
- i++
- }
-
- // Remove the \x00 from the release for Atoi to parse correctly
- release = release[:bytes.IndexByte(release, 0)]
-
- return ParseRelease(string(release))
-}
-
-// CheckKernelVersion checks if current kernel is newer than (or equal to)
-// the given version.
-func CheckKernelVersion(k, major, minor int) bool {
- if v, err := GetKernelVersion(); err != nil {
- logrus.Warnf("error getting kernel version: %s", err)
- } else {
- if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
deleted file mode 100644
index 3d3829236..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// +build windows
-
-package kernel
-
-import (
- "fmt"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-// VersionInfo holds information about the kernel.
-type VersionInfo struct {
- kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6)
- major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1)
- minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601)
- build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592)
-}
-
-func (k *VersionInfo) String() string {
- return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
-}
-
-// GetKernelVersion gets the current kernel version.
-func GetKernelVersion() (*VersionInfo, error) {
-
- var (
- h windows.Handle
- dwVersion uint32
- err error
- )
-
- KVI := &VersionInfo{"Unknown", 0, 0, 0}
-
- if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE,
- windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
- 0,
- windows.KEY_READ,
- &h); err != nil {
- return KVI, err
- }
- defer windows.RegCloseKey(h)
-
- var buf [1 << 10]uint16
- var typ uint32
- n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
-
- if err = windows.RegQueryValueEx(h,
- windows.StringToUTF16Ptr("BuildLabEx"),
- nil,
- &typ,
- (*byte)(unsafe.Pointer(&buf[0])),
- &n); err != nil {
- return KVI, err
- }
-
- KVI.kvi = windows.UTF16ToString(buf[:])
-
- // Important - docker.exe MUST be manifested for this API to return
- // the correct information.
- if dwVersion, err = windows.GetVersion(); err != nil {
- return KVI, err
- }
-
- KVI.major = int(dwVersion & 0xFF)
- KVI.minor = int((dwVersion & 0xFF00) >> 8)
- KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
-
- return KVI, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
deleted file mode 100644
index e913fad00..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package kernel
-
-import "golang.org/x/sys/unix"
-
-// Utsname represents the system name structure.
-// It is passthrough for unix.Utsname in order to make it portable with
-// other platforms where it is not available.
-type Utsname unix.Utsname
-
-func uname() (*unix.Utsname, error) {
- uts := &unix.Utsname{}
-
- if err := unix.Uname(uts); err != nil {
- return nil, err
- }
- return uts, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
deleted file mode 100644
index 49370bd3d..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package kernel
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func uname() (*unix.Utsname, error) {
- uts := &unix.Utsname{}
-
- if err := unix.Uname(uts); err != nil {
- return nil, err
- }
- return uts, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
deleted file mode 100644
index 1da3f239f..000000000
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !linux,!solaris
-
-package kernel
-
-import (
- "errors"
-)
-
-// Utsname represents the system name structure.
-// It is defined here to make it portable as it is available on linux but not
-// on windows.
-type Utsname struct {
- Release [65]byte
-}
-
-func uname() (*Utsname, error) {
- return nil, errors.New("Kernel version detection is available only on linux")
-}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
index acc897168..7b20b0628 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -11,11 +11,11 @@ import (
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
func ParseKeyValueOpt(opt string) (string, string, error) {
- parts := strings.SplitN(opt, "=", 2)
- if len(parts) != 2 {
- return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ k, v, ok := strings.Cut(opt, "=")
+ if !ok {
+ return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
}
- return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+ return strings.TrimSpace(k), strings.TrimSpace(v), nil
}
// ParseUintList parses and validates the specified string as the value
@@ -24,13 +24,14 @@ func ParseKeyValueOpt(opt string) (string, string, error) {
// input string. It returns a `map[int]bool` with available elements from `val`
// set to `true`.
// Supported formats:
-// 7
-// 1-6
-// 0,3-4,7,8-10
-// 0-0,0,1-7
-// 03,1-3 <- this is gonna get parsed as [1,2,3]
-// 3,2,1
-// 0-2,3,1
+//
+// 7
+// 1-6
+// 0,3-4,7,8-10
+// 0-0,0,1-7
+// 03,1-3 <- this is gonna get parsed as [1,2,3]
+// 3,2,1
+// 0-2,3,1
func ParseUintList(val string) (map[int]bool, error) {
if val == "" {
return map[int]bool{}, nil
@@ -41,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) {
errInvalidFormat := fmt.Errorf("invalid format: %s", val)
for _, r := range split {
- if !strings.Contains(r, "-") {
+ minS, maxS, ok := strings.Cut(r, "-")
+ if !ok {
v, err := strconv.Atoi(r)
if err != nil {
return nil, errInvalidFormat
}
availableInts[v] = true
} else {
- split := strings.SplitN(r, "-", 2)
- min, err := strconv.Atoi(split[0])
+ min, err := strconv.Atoi(minS)
if err != nil {
return nil, errInvalidFormat
}
- max, err := strconv.Atoi(split[1])
+ max, err := strconv.Atoi(maxS)
if err != nil {
return nil, errInvalidFormat
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
new file mode 100644
index 000000000..171cd81e7
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
@@ -0,0 +1,37 @@
+//go:build freebsd
+
+package reexec
+
+import (
+ "context"
+ "os"
+ "os/exec"
+
+ "golang.org/x/sys/unix"
+)
+
+// Self returns the path to the current process's binary.
+// Uses sysctl.
+func Self() string {
+ path, err := unix.SysctlArgs("kern.proc.pathname", -1)
+ if err == nil {
+ return path
+ }
+ return os.Args[0]
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ return cmd
+}
+
+// CommandContext returns *exec.Cmd which has Path as current binary.
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ return cmd
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 372bee732..025aef60a 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -1,4 +1,4 @@
-// +build linux
+//go:build linux
package reexec
@@ -17,6 +17,7 @@ func Self() string {
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd {
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
index 1ecaa906f..eefddea41 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -1,4 +1,4 @@
-// +build freebsd solaris darwin
+//go:build solaris || darwin
package reexec
@@ -17,6 +17,7 @@ func Self() string {
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -24,6 +25,7 @@ func Command(args ...string) *exec.Cmd {
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
index 9d9374268..a78b548a5 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris,!darwin
+//go:build !linux && !windows && !freebsd && !solaris && !darwin
package reexec
@@ -9,10 +9,12 @@ import (
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
return nil
}
// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
index 673ab476a..ba2f0f847 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package reexec
@@ -17,6 +17,7 @@ func Self() string {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
index c56671d91..0c032e6c4 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
@@ -7,7 +7,10 @@ import (
"path/filepath"
)
-var registeredInitializers = make(map[string]func())
+var (
+ registeredInitializers = make(map[string]func())
+ initWasCalled = false
+)
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
@@ -22,6 +25,7 @@ func Register(name string, initializer func()) {
// initialization function was called.
func Init() bool {
initializer, exists := registeredInitializers[os.Args[0]]
+ initWasCalled = true
if exists {
initializer()
@@ -30,7 +34,22 @@ func Init() bool {
return false
}
-func naiveSelf() string {
+func panicIfNotInitialized() {
+ if !initWasCalled {
+ // The reexec package is used to run subroutines in
+ // subprocesses which would otherwise have unacceptable side
+ // effects on the main thread. If you found this error, then
+ // your program uses a package which needs to do this. In
+ // order for that to work, main() should start with this
+ // boilerplate, or an equivalent:
+ // if reexec.Init() {
+ // return
+ // }
+ panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()")
+ }
+}
+
+func naiveSelf() string { //nolint: unused
name := os.Args[0]
if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil {
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go
new file mode 100644
index 000000000..1a3333dba
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go
@@ -0,0 +1,234 @@
+package regexp
+
+import (
+ "io"
+ "regexp"
+ "sync"
+)
+
+// Regexp is a wrapper struct used for wrapping MustCompile regex expressions
+// used as global variables. Using this structure helps speed the startup time
+// of apps that want to use global regex variables. This library initializes them on
+// first use as opposed to the start of the executable.
+type Regexp struct {
+ *regexpStruct
+}
+
+type regexpStruct struct {
+ _ noCopy
+ once sync.Once
+ regexp *regexp.Regexp
+ val string
+}
+
+func Delayed(val string) Regexp {
+ re := ®expStruct{
+ val: val,
+ }
+ if precompile {
+ re.regexp = regexp.MustCompile(re.val)
+ }
+ return Regexp{re}
+}
+
+func (re *regexpStruct) compile() {
+ if precompile {
+ return
+ }
+ re.once.Do(func() {
+ re.regexp = regexp.MustCompile(re.val)
+ })
+}
+
+func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
+ re.compile()
+ return re.regexp.Expand(dst, template, src, match)
+}
+
+func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte {
+ re.compile()
+ return re.regexp.ExpandString(dst, template, src, match)
+}
+
+func (re *regexpStruct) Find(b []byte) []byte {
+ re.compile()
+ return re.regexp.Find(b)
+}
+
+func (re *regexpStruct) FindAll(b []byte, n int) [][]byte {
+ re.compile()
+ return re.regexp.FindAll(b, n)
+}
+
+func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int {
+ re.compile()
+ return re.regexp.FindAllIndex(b, n)
+}
+
+func (re *regexpStruct) FindAllString(s string, n int) []string {
+ re.compile()
+ return re.regexp.FindAllString(s, n)
+}
+
+func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int {
+ re.compile()
+ return re.regexp.FindAllStringIndex(s, n)
+}
+
+func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string {
+ re.compile()
+ return re.regexp.FindAllStringSubmatch(s, n)
+}
+
+func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int {
+ re.compile()
+ return re.regexp.FindAllStringSubmatchIndex(s, n)
+}
+
+func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte {
+ re.compile()
+ return re.regexp.FindAllSubmatch(b, n)
+}
+
+func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int {
+ re.compile()
+ return re.regexp.FindAllSubmatchIndex(b, n)
+}
+
+func (re *regexpStruct) FindIndex(b []byte) (loc []int) {
+ re.compile()
+ return re.regexp.FindIndex(b)
+}
+
+func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) {
+ re.compile()
+ return re.regexp.FindReaderIndex(r)
+}
+
+func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int {
+ re.compile()
+ return re.regexp.FindReaderSubmatchIndex(r)
+}
+
+func (re *regexpStruct) FindString(s string) string {
+ re.compile()
+ return re.regexp.FindString(s)
+}
+
+func (re *regexpStruct) FindStringIndex(s string) (loc []int) {
+ re.compile()
+ return re.regexp.FindStringIndex(s)
+}
+
+func (re *regexpStruct) FindStringSubmatch(s string) []string {
+ re.compile()
+ return re.regexp.FindStringSubmatch(s)
+}
+
+func (re *regexpStruct) FindStringSubmatchIndex(s string) []int {
+ re.compile()
+ return re.regexp.FindStringSubmatchIndex(s)
+}
+
+func (re *regexpStruct) FindSubmatch(b []byte) [][]byte {
+ re.compile()
+ return re.regexp.FindSubmatch(b)
+}
+
+func (re *regexpStruct) FindSubmatchIndex(b []byte) []int {
+ re.compile()
+ return re.regexp.FindSubmatchIndex(b)
+}
+
+func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) {
+ re.compile()
+ return re.regexp.LiteralPrefix()
+}
+
+func (re *regexpStruct) Longest() {
+ re.compile()
+ re.regexp.Longest()
+}
+
+func (re *regexpStruct) Match(b []byte) bool {
+ re.compile()
+ return re.regexp.Match(b)
+}
+
+func (re *regexpStruct) MatchReader(r io.RuneReader) bool {
+ re.compile()
+ return re.regexp.MatchReader(r)
+}
+
+func (re *regexpStruct) MatchString(s string) bool {
+ re.compile()
+ return re.regexp.MatchString(s)
+}
+
+func (re *regexpStruct) NumSubexp() int {
+ re.compile()
+ return re.regexp.NumSubexp()
+}
+
+func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte {
+ re.compile()
+ return re.regexp.ReplaceAll(src, repl)
+}
+
+func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
+ re.compile()
+ return re.regexp.ReplaceAllFunc(src, repl)
+}
+
+func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte {
+ re.compile()
+ return re.regexp.ReplaceAllLiteral(src, repl)
+}
+
+func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string {
+ re.compile()
+ return re.regexp.ReplaceAllLiteralString(src, repl)
+}
+
+func (re *regexpStruct) ReplaceAllString(src, repl string) string {
+ re.compile()
+ return re.regexp.ReplaceAllString(src, repl)
+}
+
+func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string {
+ re.compile()
+ return re.regexp.ReplaceAllStringFunc(src, repl)
+}
+
+func (re *regexpStruct) Split(s string, n int) []string {
+ re.compile()
+ return re.regexp.Split(s, n)
+}
+
+func (re *regexpStruct) String() string {
+ re.compile()
+ return re.regexp.String()
+}
+
+func (re *regexpStruct) SubexpIndex(name string) int {
+ re.compile()
+ return re.regexp.SubexpIndex(name)
+}
+
+func (re *regexpStruct) SubexpNames() []string {
+ re.compile()
+ return re.regexp.SubexpNames()
+}
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go
new file mode 100644
index 000000000..ccd9d0fb1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go
@@ -0,0 +1,5 @@
+//go:build !regexp_precompile
+
+package regexp
+
+const precompile = false
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go
new file mode 100644
index 000000000..fe4421b01
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go
@@ -0,0 +1,5 @@
+//go:build regexp_precompile
+
+package regexp
+
+const precompile = true
diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
index a0c7c42a0..20abc7407 100644
--- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go
+++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
@@ -9,17 +9,22 @@ import (
"math"
"math/big"
"math/rand"
- "regexp"
"strconv"
"strings"
+ "sync"
"time"
+
+ "github.com/containers/storage/pkg/regexp"
)
const shortLen = 12
var (
- validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
- validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+ validShortID = regexp.Delayed("^[a-f0-9]{12}$")
+ validHex = regexp.Delayed(`^[a-f0-9]{64}$`)
+
+ rngLock sync.Mutex
+ rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held.
)
// IsShortID determines if an arbitrary string *looks like* a short ID.
@@ -58,7 +63,7 @@ func generateID(r io.Reader) string {
}
}
-// GenerateRandomID returns a unique id.
+// GenerateRandomID returns a pseudorandom 64-character hex string.
func GenerateRandomID() string {
return generateID(cryptorand.Reader)
}
@@ -67,7 +72,9 @@ func GenerateRandomID() string {
// secure sources of random.
// It helps you to save entropy.
func GenerateNonCryptoID() string {
- return generateID(readerFunc(rand.Read))
+ rngLock.Lock()
+ defer rngLock.Unlock()
+ return generateID(readerFunc(rng.Read))
}
// ValidateID checks whether an ID string is a valid image ID.
@@ -79,7 +86,7 @@ func ValidateID(id string) error {
}
func init() {
- // safely set the seed globally so we generate random ids. Tries to use a
+ // Initialize a private RNG so we generate random ids. Tries to use a
// crypto seed before falling back to time.
var seed int64
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
@@ -89,7 +96,7 @@ func init() {
seed = cryptoseed.Int64()
}
- rand.Seed(seed)
+ rng = rand.New(rand.NewSource(seed))
}
type readerFunc func(p []byte) (int, error)
diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
index 66a59c85d..f63c3e444 100644
--- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
+++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
@@ -3,7 +3,7 @@ package stringutils
import (
"bytes"
- "math/rand"
+ "math/rand/v2"
"strings"
)
@@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string {
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]byte, n)
for i := range b {
- b[i] = letters[rand.Intn(len(letters))]
+ b[i] = letters[rand.IntN(len(letters))]
}
return string(b)
}
@@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string {
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
res := make([]byte, n)
for i := 0; i < n; i++ {
- res[i] = chars[rand.Intn(len(chars))]
+ res[i] = chars[rand.IntN(len(chars))]
}
return string(res)
}
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
index 09d58bcbf..892d56138 100644
--- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
@@ -6,9 +6,9 @@ import (
"time"
)
-//setCTime will set the create time on a file. On Unix, the create
-//time is updated as a side effect of setting the modified time, so
-//no action is required.
+// setCTime will set the create time on a file. On Unix, the create
+// time is updated as a side effect of setting the modified time, so
+// no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
index 45428c141..f0d744eb8 100644
--- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package system
@@ -8,8 +8,8 @@ import (
"golang.org/x/sys/windows"
)
-//setCTime will set the create time on a file. On Windows, this requires
-//calling SetFileTime and explicitly including the create time.
+// setCTime will set the create time on a file. On Windows, this requires
+// calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path)
diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go
index 288318985..b87d419b5 100644
--- a/vendor/github.com/containers/storage/pkg/system/errors.go
+++ b/vendor/github.com/containers/storage/pkg/system/errors.go
@@ -4,7 +4,5 @@ import (
"errors"
)
-var (
- // ErrNotSupportedPlatform means the platform is not supported.
- ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
-)
+// ErrNotSupportedPlatform means the platform is not supported.
+var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go
index 17935088d..05642f603 100644
--- a/vendor/github.com/containers/storage/pkg/system/init.go
+++ b/vendor/github.com/containers/storage/pkg/system/init.go
@@ -6,7 +6,7 @@ import (
"unsafe"
)
-// Used by chtimes
+// maxTime is used by chtimes.
var maxTime time.Time
func init() {
diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go
index 019c66441..5f6fea1d3 100644
--- a/vendor/github.com/containers/storage/pkg/system/init_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go
@@ -13,5 +13,4 @@ func init() {
if os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true
}
-
}
diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go
new file mode 100644
index 000000000..f9de938dd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go
@@ -0,0 +1,55 @@
+//go:build freebsd
+
+package system
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Flag values from
+const (
+ /*
+ * Definitions of flags stored in file flags word.
+ *
+ * Super-user and owner changeable flags.
+ */
+ UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */
+ UF_NODUMP uint32 = 0x00000001 /* do not dump file */
+ UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */
+ UF_APPEND uint32 = 0x00000004 /* writes to file may only append */
+ UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */
+ UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */
+
+ UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */
+ UF_SPARSE uint32 = 0x00000100 /* sparse file */
+ UF_OFFLINE uint32 = 0x00000200 /* file is offline */
+ UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */
+ UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */
+ UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */
+ /* This is the same as the MacOS X definition of UF_HIDDEN. */
+ UF_HIDDEN uint32 = 0x00008000 /* file is hidden */
+
+ /*
+ * Super-user changeable flags.
+ */
+ SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */
+ SF_ARCHIVED uint32 = 0x00010000 /* file is archived */
+ SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */
+ SF_APPEND uint32 = 0x00040000 /* writes to file may only append */
+ SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */
+ SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */
+)
+
+func Lchflags(path string, flags uint32) error {
+ p, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
index cff33bb40..037ccf59d 100644
--- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
index e9d301f09..826c1f9c3 100644
--- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
@@ -14,7 +14,7 @@ import (
func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
- return nil, &os.PathError{"Lstat", path, err}
+ return nil, &os.PathError{Op: "Lstat", Path: path, Err: err}
}
return fromStatT(s)
}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
new file mode 100644
index 000000000..589cbeba7
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
@@ -0,0 +1,85 @@
+//go:build freebsd && cgo
+
+package system
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include
+// #include
+// #include
+// #include
+import "C"
+
+func getMemInfo() (int64, int64, error) {
+ data, err := unix.SysctlRaw("vm.vmtotal")
+ if err != nil {
+ return -1, -1, fmt.Errorf("can't get kernel info: %w", err)
+ }
+ if len(data) != C.sizeof_struct_vmtotal {
+ return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data))
+ }
+
+ total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0]))
+
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ npages := int64(C.sysconf(C._SC_PHYS_PAGES))
+ return pagesize * npages, pagesize * int64(total.t_free), nil
+}
+
+func getSwapInfo() (int64, int64, error) {
+ var (
+ total int64 = 0
+ used int64 = 0
+ )
+ swapCount, err := unix.SysctlUint32("vm.nswapdev")
+ if err != nil {
+ return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err)
+ }
+ for i := 0; i < int(swapCount); i++ {
+ data, err := unix.SysctlRaw("vm.swap_info", i)
+ if err != nil {
+ return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err)
+ }
+ if len(data) != C.sizeof_struct_xswdev {
+ return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data))
+ }
+ xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0]))
+ total += int64(xsw.xsw_nblks)
+ used += int64(xsw.xsw_used)
+ }
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ return pagesize * total, pagesize * (total - used), nil
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+//
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ MemTotal, MemFree, err := getMemInfo()
+ if err != nil {
+ return nil, fmt.Errorf("getting memory totals %w", err)
+ }
+ SwapTotal, SwapFree, err := getSwapInfo()
+ if err != nil {
+ return nil, fmt.Errorf("getting swap totals %w", err)
+ }
+
+ if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 {
+ return nil, errors.New("getting system memory info")
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
index 925776e78..17474e114 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -1,4 +1,4 @@
-// +build solaris,cgo
+//go:build solaris && cgo
package system
@@ -80,9 +80,9 @@ func getFreeMem() int64 {
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
-// MemInfo type.
+//
+// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
-
ppKernel := C.getPpKernel()
MemTotal := getTotalMem()
MemFree := getFreeMem()
@@ -90,7 +90,7 @@ func ReadMemInfo() (*MemInfo, error) {
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
SwapFree < 0 {
- return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ return nil, fmt.Errorf("getting system memory info %w", err)
}
meminfo := &MemInfo{}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
index 3ce019dff..db0864275 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!solaris
+//go:build !linux && !windows && !solaris && !(freebsd && cgo)
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
index 883944a4c..c833f30f7 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
@@ -27,7 +27,8 @@ type memorystatusex struct {
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
-// MemInfo type.
+//
+// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
msi := &memorystatusex{
dwLength: 64,
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go
index c276ce8e8..ff679c5b1 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod.go
@@ -1,4 +1,4 @@
-// +build !windows,!freebsd
+//go:build !windows && !freebsd
package system
@@ -8,8 +8,8 @@ import (
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
-func Mknod(path string, mode uint32, dev int) error {
- return unix.Mknod(path, mode, dev)
+func Mknod(path string, mode uint32, dev uint32) error {
+ return unix.Mknod(path, mode, int(dev))
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
index d09005589..d94353600 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
@@ -1,4 +1,4 @@
-// +build freebsd
+//go:build freebsd
package system
@@ -17,6 +17,6 @@ func Mknod(path string, mode uint32, dev uint64) error {
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
-func Mkdev(major int64, minor int64) uint32 {
- return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+func Mkdev(major int64, minor int64) uint64 {
+ return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
index 2e863c021..752f90b14 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go
index f634a6be6..ca076f2bc 100644
--- a/vendor/github.com/containers/storage/pkg/system/path.go
+++ b/vendor/github.com/containers/storage/pkg/system/path.go
@@ -17,5 +17,4 @@ func DefaultPathEnv(platform string) string {
return ""
}
return defaultUnixPathEnv
-
}
diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go
index f3762e69d..fc8de3e4d 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go
index aab891522..8838d9fd2 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package system
@@ -21,13 +21,13 @@ import (
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("No relative path specified in %q", path)
+ return "", fmt.Errorf("relative path not specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ return "", fmt.Errorf("specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go
index a9a0dd751..5090f3042 100644
--- a/vendor/github.com/containers/storage/pkg/system/process_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris darwin
+//go:build linux || freebsd || solaris || darwin
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go
index 510e71428..12243707a 100644
--- a/vendor/github.com/containers/storage/pkg/system/rm.go
+++ b/vendor/github.com/containers/storage/pkg/system/rm.go
@@ -1,12 +1,13 @@
package system
import (
+ "errors"
+ "fmt"
"os"
"syscall"
"time"
"github.com/containers/storage/pkg/mount"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -27,11 +28,17 @@ func EnsureRemoveAll(dir string) error {
// track retries
exitOnErr := make(map[string]int)
- maxRetry := 100
+ maxRetry := 1000
+
+ // Attempt a simple remove all first, this avoids the more expensive
+ // RecursiveUnmount call if not needed.
+ if err := os.RemoveAll(dir); err == nil {
+ return nil
+ }
// Attempt to unmount anything beneath this dir first
if err := mount.RecursiveUnmount(dir); err != nil {
- logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err)
+ logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err)
}
for {
@@ -40,6 +47,19 @@ func EnsureRemoveAll(dir string) error {
return nil
}
+ // If the RemoveAll fails with a permission error, we
+ // may have immutable files so try to remove the
+ // immutable flag and redo the RemoveAll.
+ if errors.Is(err, syscall.EPERM) {
+ if err = resetFileFlags(dir); err != nil {
+ return fmt.Errorf("resetting file flags: %w", err)
+ }
+ err = os.RemoveAll(dir)
+ if err == nil {
+ return nil
+ }
+ }
+
pe, ok := err.(*os.PathError)
if !ok {
return err
@@ -62,18 +82,18 @@ func EnsureRemoveAll(dir string) error {
continue
}
- if pe.Err != syscall.EBUSY {
+ if !IsEBUSY(pe.Err) {
return err
}
if e := mount.Unmount(pe.Path); e != nil {
- return errors.Wrapf(e, "error while removing %s", dir)
+ return fmt.Errorf("while removing %s: %w", dir, e)
}
if exitOnErr[pe.Path] == maxRetry {
return err
}
exitOnErr[pe.Path]++
- time.Sleep(100 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
}
}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go
new file mode 100644
index 000000000..db214c4cd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go
@@ -0,0 +1,9 @@
+//go:build !freebsd
+
+package system
+
+// Reset file flags in a directory tree. This allows EnsureRemoveAll
+// to delete trees which have the immutable flag set.
+func resetFileFlags(dir string) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go
new file mode 100644
index 000000000..39a5de7aa
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go
@@ -0,0 +1,17 @@
+package system
+
+import (
+ "io/fs"
+ "path/filepath"
+)
+
+// Reset file flags in a directory tree. This allows EnsureRemoveAll
+// to delete trees which have the immutable flag set.
+func resetFileFlags(dir string) error {
+ return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err := Lchflags(path, 0); err != nil {
+ return err
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go
new file mode 100644
index 000000000..1d57b7f40
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go
@@ -0,0 +1,11 @@
+//go:build !freebsd
+
+package system
+
+type platformStatT struct{}
+
+// Flags return file flags if supported or zero otherwise
+func (s StatT) Flags() uint32 {
+ _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms)
+ return 0
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
index 715f05b93..57850a883 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtimespec}, nil
+ mtim: s.Mtimespec,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
index 715f05b93..4b95073a3 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
@@ -2,12 +2,27 @@ package system
import "syscall"
+type platformStatT struct {
+ flags uint32
+}
+
+// Flags return file flags if supported or zero otherwise
+func (s StatT) Flags() uint32 {
+ return s.flags
+}
+
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ st := &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtimespec}, nil
+ mtim: s.Mtimespec,
+ dev: s.Dev,
+ }
+ st.flags = s.Flags
+ st.dev = s.Dev
+ return st, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
index af7af20fa..e3d13463f 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
@@ -4,12 +4,15 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
+ mtim: s.Mtim,
+ dev: uint64(s.Dev),
+ }, nil
}
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
index b607dea94..a413e1714 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
+ mtim: s.Mtim,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
index b607dea94..a413e1714 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
+ mtim: s.Mtim,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
index 2fac918bf..ffe45f32d 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
@@ -6,6 +6,8 @@ import (
"os"
"strconv"
"syscall"
+
+ "golang.org/x/sys/unix"
)
// StatT type contains status of a file. It contains metadata
@@ -17,6 +19,8 @@ type StatT struct {
rdev uint64
size int64
mtim syscall.Timespec
+ dev uint64
+ platformStatT
}
// Mode returns file's permission mode.
@@ -49,6 +53,15 @@ func (s StatT) Mtim() syscall.Timespec {
return s.mtim
}
+// Dev returns a unique identifier for owning filesystem
+func (s StatT) Dev() uint64 {
+ return s.dev
+}
+
+func (s StatT) IsDir() bool {
+ return (s.mode & unix.S_IFDIR) != 0
+}
+
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
index d30636052..828be2088 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
@@ -11,6 +11,7 @@ type StatT struct {
mode os.FileMode
size int64
mtim time.Time
+ platformStatT
}
// Size returns file's size.
@@ -42,6 +43,15 @@ func (s StatT) GID() uint32 {
return 0
}
+// Dev returns a unique identifier for owning filesystem
+func (s StatT) Dev() uint64 {
+ return 0
+}
+
+func (s StatT) IsDir() bool {
+ return s.Mode().IsDir()
+}
+
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
@@ -59,5 +69,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) {
return &StatT{
size: (*fi).Size(),
mode: (*fi).Mode(),
- mtim: (*fi).ModTime()}, nil
+ mtim: (*fi).ModTime(),
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
index 49dbdd378..d1b41f34d 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
@@ -1,8 +1,12 @@
-// +build linux freebsd
+//go:build !windows
package system
-import "golang.org/x/sys/unix"
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
// Unmount is a platform-specific helper function to call
// the unmount syscall.
@@ -15,3 +19,8 @@ func Unmount(dest string) error {
func CommandLineToArgv(commandLine string) ([]string, error) {
return []string{commandLine}, nil
}
+
+// IsEBUSY checks if the specified error is EBUSY.
+func IsEBUSY(err error) bool {
+ return errors.Is(err, unix.EBUSY)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
index 23e9b207c..f4d8692cd 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
@@ -120,3 +120,8 @@ func HasWin32KSupport() bool {
// APIs.
return ntuserApiset.Load() == nil
}
+
+// IsEBUSY checks if the specified error is EBUSY.
+func IsEBUSY(err error) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go
index 5a10eda5a..9b02a1887 100644
--- a/vendor/github.com/containers/storage/pkg/system/umask.go
+++ b/vendor/github.com/containers/storage/pkg/system/umask.go
@@ -1,4 +1,4 @@
-// +build !windows
+//go:build !windows
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
index 13f1de176..c0b69ab1b 100644
--- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
@@ -1,4 +1,4 @@
-// +build windows
+//go:build windows
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
index 6a7752437..edc588a63 100644
--- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
@@ -10,13 +10,14 @@ import (
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
var _path *byte
_path, err := unix.BytePtrFromString(path)
if err != nil {
return err
}
-
- if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
index 139714544..b6c36339d 100644
--- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd
+//go:build !linux && !freebsd
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
new file mode 100644
index 000000000..75275b964
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
@@ -0,0 +1,84 @@
+package system
+
+import (
+ "bytes"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // Value is larger than the maximum size allowed
+ E2BIG unix.Errno = unix.E2BIG
+
+ // Operation not supported
+ EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// Returns a []byte slice if the xattr is set and nil otherwise.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+
+ switch {
+ case errno == unix.ENOATTR:
+ return nil, nil
+ case errno != nil:
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ if err := unix.Lsetxattr(path, attr, data, flags); err != nil {
+ return &os.PathError{Op: "lsetxattr", Path: path, Err: err}
+ }
+
+ return nil
+}
+
+// Llistxattr lists extended attributes associated with the given path
+// in the file system.
+func Llistxattr(path string) ([]string, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Llistxattr(path, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Llistxattr(path, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ dest = make([]byte, sz)
+ sz, errno = unix.Llistxattr(path, dest)
+ }
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ var attrs []string
+ for _, token := range bytes.Split(dest[:sz], []byte{0}) {
+ if len(token) > 0 {
+ attrs = append(attrs, string(token))
+ }
+ }
+
+ return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
index 10355848b..6b47c4e71 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -13,6 +13,9 @@ const (
// Operation not supported
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+
+ // Value is too small or too large for maximum size allowed
+ EOVERFLOW unix.Errno = unix.EOVERFLOW
)
// Lgetxattr retrieves the value of the extended attribute identified by attr
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
index bc8b8e3a5..0ab61f3de 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux && !darwin
package system
@@ -10,6 +10,9 @@ const (
// Operation not supported
EOPNOTSUPP syscall.Errno = syscall.Errno(0)
+
+ // Value is too small or too large for maximum size allowed
+ EOVERFLOW syscall.Errno = syscall.Errno(0)
)
// Lgetxattr is not supported on platforms other than linux.
diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
index 26cd8504c..674e0a0ba 100644
--- a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
+++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
@@ -34,7 +34,7 @@ func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
}
// Make sure to avoid writes after the reader has been closed.
if err := reader.Close(); err != nil {
- logrus.Errorf("error closing tarlogger reader: %v", err)
+ logrus.Errorf("Closing tarlogger reader: %v", err)
}
// Unblock the Close().
t.closeMutex.Unlock()
diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
index 74776e65e..c14a5cc4d 100644
--- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
+++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
@@ -9,12 +9,12 @@ import (
"strings"
"sync"
- "github.com/tchap/go-patricia/patricia"
+ "github.com/tchap/go-patricia/v2/patricia"
)
var (
// ErrEmptyPrefix is an error returned if the prefix was empty.
- ErrEmptyPrefix = errors.New("Prefix can't be empty")
+ ErrEmptyPrefix = errors.New("prefix can't be empty")
// ErrIllegalChar is returned when a space is in the ID
ErrIllegalChar = errors.New("illegal character: ' '")
@@ -25,7 +25,7 @@ var (
// ErrAmbiguousPrefix is returned if the prefix was ambiguous
// (multiple ids for the prefix).
-type ErrAmbiguousPrefix struct {
+type ErrAmbiguousPrefix struct { //nolint: errname
prefix string
}
@@ -42,6 +42,7 @@ type TruncIndex struct {
}
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
+// Invalid IDs are _silently_ ignored.
func NewTruncIndex(ids []string) (idx *TruncIndex) {
idx = &TruncIndex{
ids: make(map[string]struct{}),
@@ -51,7 +52,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) {
trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
}
for _, id := range ids {
- idx.addID(id)
+ _ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem.
}
return
}
@@ -101,9 +102,7 @@ func (idx *TruncIndex) Get(s string) (string, error) {
if s == "" {
return "", ErrEmptyPrefix
}
- var (
- id string
- )
+ var id string
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
if id != "" {
// we haven't found the ID if there are two or more IDs
@@ -132,7 +131,8 @@ func (idx *TruncIndex) Get(s string) (string, error) {
func (idx *TruncIndex) Iterate(handler func(id string)) {
idx.Lock()
defer idx.Unlock()
- idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
+ // Ignore the error from Visit: it can only fail if the provided visitor fails, and ours never does.
+ _ = idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
handler(string(prefix))
return nil
})
diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
index 4f441c32c..14aaeddcf 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
@@ -1,4 +1,4 @@
-// +build linux,cgo
+//go:build linux && cgo
package unshare
diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
index a5005403a..f970935b5 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
@@ -1,4 +1,4 @@
-// +build linux,!cgo
+//go:build linux && !cgo
package unshare
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
index c0e359b27..a2800654f 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
@@ -1,4 +1,4 @@
-#ifndef UNSHARE_NO_CODE_AT_ALL
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__)
#define _GNU_SOURCE
#include
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#include
#include
#include
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
index 53cfeb0ec..00f397f35 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
@@ -5,19 +5,12 @@ import (
"os"
"os/user"
"sync"
-
- "github.com/pkg/errors"
- "github.com/syndtr/gocapability/capability"
)
var (
homeDirOnce sync.Once
homeDirErr error
homeDir string
-
- hasCapSysAdminOnce sync.Once
- hasCapSysAdminRet bool
- hasCapSysAdminErr error
)
// HomeDir returns the home directory for the current user.
@@ -27,7 +20,7 @@ func HomeDir() (string, error) {
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID()))
if err != nil {
- homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory")
+ homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err)
return
}
homeDir, homeDirErr = usr.HomeDir, nil
@@ -37,20 +30,3 @@ func HomeDir() (string, error) {
})
return homeDir, homeDirErr
}
-
-// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
-func HasCapSysAdmin() (bool, error) {
- hasCapSysAdminOnce.Do(func() {
- currentCaps, err := capability.NewPid2(0)
- if err != nil {
- hasCapSysAdminErr = err
- return
- }
- if err = currentCaps.Load(); err != nil {
- hasCapSysAdminErr = err
- return
- }
- hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
- })
- return hasCapSysAdminRet, hasCapSysAdminErr
-}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
index b3f8099f6..f575fba2e 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
@@ -1,10 +1,10 @@
-// +build linux,cgo,!gccgo
+//go:build (linux && cgo && !gccgo) || (freebsd && cgo)
package unshare
// #cgo CFLAGS: -Wall
// extern void _containers_unshare(void);
-// void __attribute__((constructor)) init(void) {
+// static void __attribute__((constructor)) init(void) {
// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
new file mode 100644
index 000000000..5d0a7a683
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
@@ -0,0 +1,58 @@
+//go:build darwin
+
+package unshare
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
+)
+
+// IsRootless tells us if we are running in rootless mode
+func IsRootless() bool {
+ return true
+}
+
+// GetRootlessUID returns the UID of the user in the parent userNS
+func GetRootlessUID() int {
+ return os.Getuid()
+}
+
+// GetRootlessGID returns the GID of the user in the parent userNS
+func GetRootlessGID() int {
+ return os.Getgid()
+}
+
+// RootlessEnv returns the environment settings for the rootless containers
+func RootlessEnv() []string {
+ return append(os.Environ(), UsernsEnvName+"=")
+}
+
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
+func MaybeReexecUsingUserNamespace(evenForRoot bool) {
+}
+
+// GetHostIDMappings reads mappings for the specified process (or the current
+// process if pid is "self" or an empty string) from the kernel.
+func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
+ return nil, nil, nil
+}
+
+// ParseIDMappings parses mapping triples.
+func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
+ uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ return uid, gid, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
new file mode 100644
index 000000000..0b2f17886
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
@@ -0,0 +1,76 @@
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__)
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+static int _containers_unshare_parse_envint(const char *envname) {
+ char *p, *q;
+ long l;
+
+ p = getenv(envname);
+ if (p == NULL) {
+ return -1;
+ }
+ q = NULL;
+ l = strtol(p, &q, 10);
+ if ((q == NULL) || (*q != '\0')) {
+ fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
+ _exit(1);
+ }
+ unsetenv(envname);
+ return l;
+}
+
+void _containers_unshare(void)
+{
+ int pidfd, continuefd, n, pgrp, sid, ctty;
+ char buf[2048];
+
+ pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
+ if (pidfd != -1) {
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
+ size_t size = write(pidfd, buf, strlen(buf));
+ if (size != strlen(buf)) {
+ fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
+ _exit(1);
+ }
+ close(pidfd);
+ }
+ continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
+ if (continuefd != -1) {
+ n = read(continuefd, buf, sizeof(buf));
+ if (n > 0) {
+ fprintf(stderr, "Error: %.*s\n", n, buf);
+ _exit(1);
+ }
+ close(continuefd);
+ }
+ sid = _containers_unshare_parse_envint("_Containers-setsid");
+ if (sid == 1) {
+ if (setsid() == -1) {
+ fprintf(stderr, "Error during setsid: %m\n");
+ _exit(1);
+ }
+ }
+ pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
+ if (pgrp == 1) {
+ if (setpgrp(0, 0) == -1) {
+ fprintf(stderr, "Error during setpgrp: %m\n");
+ _exit(1);
+ }
+ }
+ ctty = _containers_unshare_parse_envint("_Containers-ctty");
+ if (ctty != -1) {
+ if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
+ fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
+ _exit(1);
+ }
+ }
+}
+
+#endif
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
new file mode 100644
index 000000000..37a87fa5b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
@@ -0,0 +1,178 @@
+//go:build freebsd
+
+package unshare
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+)
+
+// Cmd wraps an exec.Cmd created by the reexec package in unshare(),
+// and one day might handle setting ID maps and other related setting*s
+// by triggering initialization code in the child.
+type Cmd struct {
+ *exec.Cmd
+ Setsid bool
+ Setpgrp bool
+ Ctty *os.File
+ Hook func(pid int) error
+}
+
+// Command creates a new Cmd which can be customized.
+func Command(args ...string) *Cmd {
+ cmd := reexec.Command(args...)
+ return &Cmd{
+ Cmd: cmd,
+ }
+}
+
+func (c *Cmd) Start() error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Set environment variables to tell the child to synchronize its startup.
+ if c.Env == nil {
+ c.Env = os.Environ()
+ }
+
+ // Create the pipe for reading the child's PID.
+ pidRead, pidWrite, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating pid pipe: %w", err)
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, pidWrite)
+
+ // Create the pipe for letting the child know to proceed.
+ continueRead, continueWrite, err := os.Pipe()
+ if err != nil {
+ pidRead.Close()
+ pidWrite.Close()
+ return fmt.Errorf("creating continue read/write pipe: %w", err)
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, continueRead)
+
+ // Pass along other instructions.
+ if c.Setsid {
+ c.Env = append(c.Env, "_Containers-setsid=1")
+ }
+ if c.Setpgrp {
+ c.Env = append(c.Env, "_Containers-setpgrp=1")
+ }
+ if c.Ctty != nil {
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
+ }
+
+ // Make sure we clean up our pipes.
+ defer func() {
+ if pidRead != nil {
+ pidRead.Close()
+ }
+ if pidWrite != nil {
+ pidWrite.Close()
+ }
+ if continueRead != nil {
+ continueRead.Close()
+ }
+ if continueWrite != nil {
+ continueWrite.Close()
+ }
+ }()
+
+ // Start the new process.
+ err = c.Cmd.Start()
+ if err != nil {
+ return err
+ }
+
+ // Close the ends of the pipes that the parent doesn't need.
+ continueRead.Close()
+ continueRead = nil
+ pidWrite.Close()
+ pidWrite = nil
+
+ // Read the child's PID from the pipe.
+ pidString := ""
+ b := new(bytes.Buffer)
+ if _, err := io.Copy(b, pidRead); err != nil {
+ return fmt.Errorf("reading child PID: %w", err)
+ }
+ pidString = b.String()
+ pid, err := strconv.Atoi(pidString)
+ if err != nil {
+ fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
+ }
+
+ // Run any additional setup that we want to do before the child starts running proper.
+ if c.Hook != nil {
+ if err = c.Hook(pid); err != nil {
+ fmt.Fprintf(continueWrite, "hook error: %v", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Cmd) Run() error {
+ if err := c.Start(); err != nil {
+ return err
+ }
+ return c.Wait()
+}
+
+func (c *Cmd) CombinedOutput() ([]byte, error) {
+ return nil, errors.New("unshare: CombinedOutput() not implemented")
+}
+
+func (c *Cmd) Output() ([]byte, error) {
+ return nil, errors.New("unshare: Output() not implemented")
+}
+
+type Runnable interface {
+ Run() error
+}
+
+// ExecRunnable runs the specified unshare command, captures its exit status,
+// and exits with the same status.
+func ExecRunnable(cmd Runnable, cleanup func()) {
+ exit := func(status int) {
+ if cleanup != nil {
+ cleanup()
+ }
+ os.Exit(status)
+ }
+ if err := cmd.Run(); err != nil {
+ if exitError, ok := err.(*exec.ExitError); ok {
+ if exitError.ProcessState.Exited() {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ logrus.Debugf("%v", exitError)
+ exit(waitStatus.ExitStatus())
+ }
+ if waitStatus.Signaled() {
+ logrus.Debugf("%v", exitError)
+ exit(int(waitStatus.Signal()) + 128)
+ }
+ }
+ }
+ }
+ logrus.Errorf("%v", err)
+ logrus.Errorf("(Unable to determine exit status)")
+ exit(1)
+ }
+ exit(0)
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
index 2f95da7d8..818983474 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
@@ -1,10 +1,10 @@
-// +build linux,cgo,gccgo
+//go:build linux && cgo && gccgo
package unshare
// #cgo CFLAGS: -Wall -Wextra
// extern void _containers_unshare(void);
-// void __attribute__((constructor)) init(void) {
+// static void __attribute__((constructor)) init(void) {
// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index 96b857543..b45a6819a 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -1,14 +1,16 @@
-// +build linux
+//go:build linux
package unshare
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"os"
"os/exec"
+ "os/signal"
"os/user"
"runtime"
"strconv"
@@ -18,10 +20,9 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
+ "github.com/moby/sys/capability"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/syndtr/gocapability/capability"
)
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
@@ -31,9 +32,9 @@ type Cmd struct {
*exec.Cmd
UnshareFlags int
UseNewuidmap bool
- UidMappings []specs.LinuxIDMapping // nolint: golint
+ UidMappings []specs.LinuxIDMapping // nolint: revive,golint
UseNewgidmap bool
- GidMappings []specs.LinuxIDMapping // nolint: golint
+ GidMappings []specs.LinuxIDMapping // nolint: revive,golint
GidMappingsEnableSetgroups bool
Setsid bool
Setpgrp bool
@@ -75,6 +76,28 @@ func getRootlessGID() int {
return os.Getegid()
}
+// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the
+// matching file capability
+func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return false, err
+ }
+
+ mode := info.Mode()
+ if mode&modeid == modeid {
+ return true, nil
+ }
+ cap, err := capability.NewFile2(path)
+ if err != nil {
+ return false, err
+ }
+ if err := cap.Load(); err != nil {
+ return false, err
+ }
+ return cap.Get(capability.EFFECTIVE, capid), nil
+}
+
func (c *Cmd) Start() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@@ -95,7 +118,7 @@ func (c *Cmd) Start() error {
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
@@ -105,7 +128,7 @@ func (c *Cmd) Start() error {
if err != nil {
pidRead.Close()
pidWrite.Close()
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating continue read/write pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
@@ -151,16 +174,15 @@ func (c *Cmd) Start() error {
pidWrite = nil
// Read the child's PID from the pipe.
- pidString := ""
b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil {
- return errors.Wrapf(err, "error reading child PID")
+ return fmt.Errorf("reading child PID: %w", err)
}
- pidString = b.String()
+ pidString := b.String()
pid, err := strconv.Atoi(pidString)
if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
- return errors.Wrapf(err, "error parsing PID %q", pidString)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
}
pidString = fmt.Sprintf("%d", pid)
@@ -170,18 +192,18 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if c.GidMappingsEnableSetgroups {
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
- return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err)
}
} else {
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
- return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err)
}
}
@@ -189,7 +211,7 @@ func (c *Cmd) Start() error {
uidmap, gidmap, err := GetHostIDMappings("")
if err != nil {
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
- return errors.Wrapf(err, "error reading ID mappings in parent")
+ return fmt.Errorf("reading ID mappings in parent: %w", err)
}
if len(c.UidMappings) == 0 {
c.UidMappings = uidmap
@@ -214,16 +236,27 @@ func (c *Cmd) Start() error {
gidmapSet := false
// Set the GID map.
if c.UseNewgidmap {
- cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
+ path, err := exec.LookPath("newgidmap")
+ if err != nil {
+ return fmt.Errorf("finding newgidmap: %w", err)
+ }
+ cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
g.Reset()
cmd.Stdout = g
cmd.Stderr = g
- err := cmd.Run()
- if err == nil {
+ if err := cmd.Run(); err == nil {
gidmapSet = true
} else {
- logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("running newgidmap: %v: %s", err, g.String())
+ isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID)
+ if err != nil {
+ logrus.Warnf("Failed to check for setgid on %s: %v", path, err)
+ } else {
+ if !isSetgid {
+ logrus.Warnf("%s should be setgid or have filecaps setgid", path)
+ }
+ }
+ logrus.Warnf("Falling back to single mapping")
g.Reset()
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
}
@@ -233,23 +266,23 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err)
}
}
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
- fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
+ fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err)
+ return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err)
}
defer gidmap.Close()
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
- fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
+ fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
+ return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err)
}
}
}
@@ -261,18 +294,30 @@ func (c *Cmd) Start() error {
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
uidmapSet := false
- // Set the GID map.
+ // Set the UID map.
if c.UseNewuidmap {
- cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
+ path, err := exec.LookPath("newuidmap")
+ if err != nil {
+ return fmt.Errorf("finding newuidmap: %w", err)
+ }
+ cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
u.Reset()
cmd.Stdout = u
cmd.Stderr = u
- err := cmd.Run()
- if err == nil {
+ if err := cmd.Run(); err == nil {
uidmapSet = true
} else {
- logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("Error running newuidmap: %v: %s", err, u.String())
+ isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID)
+ if err != nil {
+ logrus.Warnf("Failed to check for setuid on %s: %v", path, err)
+ } else {
+ if !isSetuid {
+ logrus.Warnf("%s should be setuid or have filecaps setuid", path)
+ }
+ }
+
+ logrus.Warnf("Falling back to single mapping")
u.Reset()
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
}
@@ -281,12 +326,12 @@ func (c *Cmd) Start() error {
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
+ return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err)
}
defer uidmap.Close()
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
+ return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err)
}
}
}
@@ -296,12 +341,12 @@ func (c *Cmd) Start() error {
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
+ return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err)
}
defer oomScoreAdj.Close()
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
- return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
+ return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err)
}
}
// Run any additional setup that we want to do before the child starts running proper.
@@ -340,10 +385,47 @@ const (
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
)
+// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped.
+func hasFullUsersMappings() (bool, error) {
+ content, err := os.ReadFile("/proc/self/uid_map")
+ if err != nil {
+ return false, err
+ }
+ // The kernel rejects attempts to create mappings where either starting
+ // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 .
+ // So, if the uid_map contains 4294967295, the entire IDs space is available in the
+ // user namespace, so it is likely the initial user namespace.
+ return bytes.Contains(content, []byte("4294967295")), nil
+}
+
+var (
+ hasCapSysAdminOnce sync.Once
+ hasCapSysAdminRet bool
+ hasCapSysAdminErr error
+)
+
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != ""
+ if !isRootless {
+ hasCapSysAdmin, err := HasCapSysAdmin()
+ if err != nil {
+ logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process")
+ }
+ if err == nil && !hasCapSysAdmin {
+ isRootless = true
+ }
+ }
+ if !isRootless {
+ hasMappings, err := hasFullUsersMappings()
+ if err != nil {
+ logrus.Warnf("Failed to read current user namespace mappings")
+ }
+ if err == nil && !hasMappings {
+ isRootless = true
+ }
+ }
})
return isRootless
}
@@ -358,6 +440,16 @@ func GetRootlessUID() int {
return os.Getuid()
}
+// GetRootlessGID returns the GID of the user in the parent userNS
+func GetRootlessGID() int {
+ gidEnv := getenv("_CONTAINERS_ROOTLESS_GID")
+ if gidEnv != "" {
+ u, _ := strconv.Atoi(gidEnv)
+ return u
+ }
+ return os.Getgid()
+}
+
// RootlessEnv returns the environment settings for the rootless containers
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=done")
@@ -367,7 +459,7 @@ type Runnable interface {
Run() error
}
-func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
+func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname
if err != nil {
if format != "" {
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
@@ -381,7 +473,7 @@ func bailOnError(err error, format string, a ...interface{}) { // nolint: golint
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we've already been through this once, no need to try again.
- if os.Geteuid() == 0 && IsRootless() {
+ if os.Geteuid() == 0 && GetRootlessUID() > 0 {
return
}
@@ -407,7 +499,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// ID and a range size.
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
if err != nil {
- logrus.Warnf("error reading allowed ID mappings: %v", err)
+ logrus.Warnf("Reading allowed ID mappings: %v", err)
}
if len(uidmap) == 0 {
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
@@ -433,14 +525,17 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
} else {
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
// to use unshare(), so don't bother creating a new user namespace at this point.
- capabilities, err := capability.NewPid(0)
- bailOnError(err, "error reading the current capabilities sets")
+ capabilities, err := capability.NewPid2(0)
+ bailOnError(err, "Initializing a new Capabilities object of pid 0")
+ err = capabilities.Load()
+ bailOnError(err, "Reading the current capabilities sets")
+
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
return
}
// Read the set of ID mappings that we're currently using.
uidmap, gidmap, err = GetHostIDMappings("")
- bailOnError(err, "error reading current ID mappings")
+ bailOnError(err, "Reading current ID mappings")
// Just reuse them.
for i := range uidmap {
uidmap[i].HostID = uidmap[i].ContainerID
@@ -463,7 +558,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
- logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
+ logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err)
os.Exit(1)
}
}
@@ -483,7 +578,36 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
cmd.GidMappingsEnableSetgroups = true
// Finish up.
- logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+ logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+
+ // Forward SIGHUP, SIGINT, and SIGTERM to our child process.
+ interrupted := make(chan os.Signal, 100)
+ defer func() {
+ signal.Stop(interrupted)
+ close(interrupted)
+ }()
+ cmd.Hook = func(int) error {
+ go func() {
+ for receivedSignal := range interrupted {
+ if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil {
+ logrus.Warnf(
+ "Failed to send a signal '%d' to the Process (PID: %d): %v",
+ receivedSignal, cmd.Cmd.Process.Pid, err,
+ )
+ }
+ }
+ }()
+ return nil
+ }
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+
+ // Make sure our child process gets SIGKILLed if we exit, for whatever
+ // reason, before it does.
+ if cmd.Cmd.SysProcAttr == nil {
+ cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+
ExecRunnable(cmd, nil)
}
@@ -497,22 +621,22 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
os.Exit(status)
}
if err := cmd.Run(); err != nil {
- if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ProcessState.Exited() {
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
if waitStatus.Exited() {
- logrus.Errorf("%v", exitError)
+ logrus.Debugf("%v", exitError)
exit(waitStatus.ExitStatus())
}
if waitStatus.Signaled() {
- logrus.Errorf("%v", exitError)
+ logrus.Debugf("%v", exitError)
exit(int(waitStatus.Signal()) + 128)
}
}
}
}
logrus.Errorf("%v", err)
- logrus.Errorf("(unable to determine exit status)")
+ logrus.Errorf("(Unable to determine exit status)")
exit(1)
}
exit(0)
@@ -523,7 +647,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
+ return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -531,19 +655,19 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
line := scanner.Text()
fields := strings.Fields(line)
if len(fields) != 3 {
- return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
+ return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
}
cid, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
+ return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err)
}
hid, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
+ return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err)
}
size, err := strconv.ParseUint(fields[2], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
+ return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err)
}
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
}
@@ -571,7 +695,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
+ return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
@@ -603,3 +727,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
}
return uid, gid, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ hasCapSysAdminOnce.Do(func() {
+ currentCaps, err := capability.NewPid2(0)
+ if err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ if err = currentCaps.Load(); err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
+ })
+ return hasCapSysAdminRet, hasCapSysAdminErr
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
index bf4d567b8..05706b8fe 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+//go:build !linux && !darwin
package unshare
@@ -16,7 +16,7 @@ const (
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
- return false
+ return os.Getuid() != 0
}
// GetRootlessUID returns the UID of the user in the parent userNS
@@ -24,6 +24,11 @@ func GetRootlessUID() int {
return os.Getuid()
}
+// GetRootlessGID returns the GID of the user in the parent userNS
+func GetRootlessGID() int {
+ return os.Getgid()
+}
+
// RootlessEnv returns the environment settings for the rootless containers
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=")
@@ -43,3 +48,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
return nil, nil, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ return os.Geteuid() == 0, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
index d5f2d22a8..ae2869d74 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
@@ -1,4 +1,4 @@
-// +build !linux,cgo
+//go:build cgo && !(linux || freebsd)
package unshare
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index e70f4f018..962325233 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -1,22 +1,51 @@
-# This file is is the configuration file for all tools
-# that use the containers/storage library.
+# This file is the configuration file for all tools
+# that use the containers/storage library. The storage.conf file
+# overrides all other storage.conf files. Container engines using the
+# container/storage library do not inherit fields from other storage.conf
+# files.
+#
+# Note: The storage.conf file overrides other storage.conf files based on this precedence:
+# /usr/containers/storage.conf
+# /etc/containers/storage.conf
+# $HOME/.config/containers/storage.conf
+# $XDG_CONFIG_HOME/containers/storage.conf (if XDG_CONFIG_HOME is set)
# See man 5 containers-storage.conf for more information
-# The "container storage" table contains all of the server options.
+# The "storage" table contains all of the server options.
[storage]
-# Default Storage Driver, Must be set for proper operation.
+# Default storage driver, must be set for proper operation.
driver = "overlay"
# Temporary storage location
runroot = "/run/containers/storage"
+# Priority list for the storage drivers that will be tested one
+# after the other to pick the storage driver if it is not defined.
+# driver_priority = ["overlay", "btrfs"]
+
# Primary Read/Write location of container storage
+# When changing the graphroot location on an SELinux system, you must
+# ensure the labeling matches the default location's labels with the
+# following commands:
+# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH
+# restorecon -R -v /NEWSTORAGEPATH
graphroot = "/var/lib/containers/storage"
+# Optional alternate location of image store if a location separate from the
+# container store is required. If set, it must be different than graphroot.
+# imagestore = ""
+
+
# Storage path for rootless users
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
+# Transient store mode makes all container metadata be saved in temporary storage
+# (i.e. runroot above). This is faster, but doesn't persist across reboots.
+# Additional garbage collection must also be performed at boot-time, so this
+# option should remain disabled in most configurations.
+# transient_store = true
+
[storage.options]
# Storage options to be passed to underlying storage drivers
@@ -25,26 +54,31 @@ graphroot = "/var/lib/containers/storage"
additionalimagestores = [
]
-# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
-# a container, to the UIDs/GIDs as they should appear outside of the container,
-# and the length of the range of UIDs/GIDs. Additional mapped sets can be
-# listed and will be heeded by libraries, but there are limits to the number of
-# mappings which the kernel will allow when you later attempt to run a
-# container.
-#
-# remap-uids = 0:1668442479:65536
-# remap-gids = 0:1668442479:65536
-
-# Remap-User/Group is a user name which can be used to look up one or more UID/GID
-# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
-# with an in-container ID of 0 and then a host-level ID taken from the lowest
-# range that matches the specified name, and using the length of that range.
-# Additional ranges are then assigned, using the ranges which specify the
-# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
-# until all of the entries have been used for maps.
-#
-# remap-user = "containers"
-# remap-group = "containers"
+# Options controlling how storage is populated when pulling images.
+[storage.options.pull_options]
+# Enable the "zstd:chunked" feature, which allows partial pulls, reusing
+# content that already exists on the system. This is disabled by default,
+# and must be explicitly enabled to be used. For more on zstd:chunked, see
+# https://github.com/containers/storage/blob/main/docs/containers-storage-zstd-chunked.md
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# enable_partial_images = "false"
+
+# Tells containers/storage to use hard links rather then create new files in
+# the image, if an identical file already existed in storage.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# use_hard_links = "false"
+
+# Path to an ostree repository that might have
+# previously pulled content which can be used when attempting to avoid
+# pulling content from the container registry.
+# ostree_repos=""
+
+# If set to "true", containers/storage will convert images that are
+# not already in zstd:chunked format to that format before processing
+# in order to take advantage of local deduplication and hard linking.
+# It is an expensive operation so it is not enabled by default.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# convert_images = "false"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
@@ -57,7 +91,7 @@ additionalimagestores = [
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024
#
-# Auto-userns-max-size is the minimum size for a user namespace created automatically.
+# Auto-userns-max-size is the maximum size for a user namespace created automatically.
# auto-userns-max-size=65536
[storage.options.overlay]
@@ -67,8 +101,12 @@ additionalimagestores = [
# squashed down to the default uid in the container. These images will have no
# separation between the users in the container. Only supported for the overlay
# and vfs drivers.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
#ignore_chown_errors = "false"
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
@@ -77,8 +115,13 @@ additionalimagestores = [
mountopt = "nodev"
# Set to skip a PRIVATE bind mount on the storage home directory.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
# skip_mount_home = "false"
+# Set to use composefs to mount data layers with overlay.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# use_composefs = "false"
+
# Size is used to set a maximum size of the container image.
# size = ""
@@ -110,83 +153,7 @@ mountopt = "nodev"
# future. When "force_mask" is set the original permission mask is stored in
# the "user.containers.override_stat" xattr and the "mount_program" option must
# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
-# extended attribute permissions to processes within containers rather then the
+# extended attribute permissions to processes within containers rather than the
# "force_mask" permissions.
#
# force_mask = ""
-
-[storage.options.thinpool]
-# Storage Options for thinpool
-
-# autoextend_percent determines the amount by which pool needs to be
-# grown. This is specified in terms of % of pool size. So a value of 20 means
-# that when threshold is hit, pool will be grown by 20% of existing
-# pool size.
-# autoextend_percent = "20"
-
-# autoextend_threshold determines the pool extension threshold in terms
-# of percentage of pool size. For example, if threshold is 60, that means when
-# pool is 60% full, threshold has been hit.
-# autoextend_threshold = "80"
-
-# basesize specifies the size to use when creating the base device, which
-# limits the size of images and containers.
-# basesize = "10G"
-
-# blocksize specifies a custom blocksize to use for the thin pool.
-# blocksize="64k"
-
-# directlvm_device specifies a custom block storage device to use for the
-# thin pool. Required if you setup devicemapper.
-# directlvm_device = ""
-
-# directlvm_device_force wipes device even if device already has a filesystem.
-# directlvm_device_force = "True"
-
-# fs specifies the filesystem type to use for the base device.
-# fs="xfs"
-
-# log_level sets the log level of devicemapper.
-# 0: LogLevelSuppress 0 (Default)
-# 2: LogLevelFatal
-# 3: LogLevelErr
-# 4: LogLevelWarn
-# 5: LogLevelNotice
-# 6: LogLevelInfo
-# 7: LogLevelDebug
-# log_level = "7"
-
-# min_free_space specifies the min free space percent in a thin pool require for
-# new device creation to succeed. Valid values are from 0% - 99%.
-# Value 0% disables
-# min_free_space = "10%"
-
-# mkfsarg specifies extra mkfs arguments to be used when creating the base
-# device.
-# mkfsarg = ""
-
-# metadata_size is used to set the `pvcreate --metadatasize` options when
-# creating thin devices. Default is 128k
-# metadata_size = ""
-
-# Size is used to set a maximum size of the container image.
-# size = ""
-
-# use_deferred_removal marks devicemapper block device for deferred removal.
-# If the thinpool is in use when the driver attempts to remove it, the driver
-# tells the kernel to remove it as soon as possible. Note this does not free
-# up the disk space, use deferred deletion to fully remove the thinpool.
-# use_deferred_removal = "True"
-
-# use_deferred_deletion marks thinpool device for deferred deletion.
-# If the device is busy when the driver attempts to delete it, the driver
-# will attempt to delete device every 30 seconds until successful.
-# If the program using the driver exits, the driver will continue attempting
-# to cleanup the next time the driver is used. Deferred deletion permanently
-# deletes the device and all data stored in device will be lost.
-# use_deferred_deletion = "True"
-
-# xfs_nospace_max_retries specifies the maximum number of retries XFS should
-# attempt to complete IO when ENOSPC (no space) error is returned by
-# underlying storage device.
-# xfs_nospace_max_retries = "0"
diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd
new file mode 100644
index 000000000..5f421e0c1
--- /dev/null
+++ b/vendor/github.com/containers/storage/storage.conf-freebsd
@@ -0,0 +1,115 @@
+# This file is is the configuration file for all tools
+# that use the containers/storage library. The storage.conf file
+# overrides all other storage.conf files. Container engines using the
+# container/storage library do not inherit fields from other storage.conf
+# files.
+#
+# Note: The storage.conf file overrides other storage.conf files based on this precedence:
+# /usr/local/share/containers/storage.conf
+# /usr/local/etc/containers/storage.conf
+# $HOME/.config/containers/storage.conf
+# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
+# See man 5 containers-storage.conf for more information
+# The "container storage" table contains all of the server options.
+[storage]
+
+# Default Storage Driver, Must be set for proper operation.
+driver = "zfs"
+
+# Temporary storage location
+runroot = "/var/run/containers/storage"
+
+# Primary Read/Write location of container storage
+graphroot = "/var/db/containers/storage"
+
+# Optional value for image storage location
+# If set, it must be different than graphroot.
+
+# imagestore = ""
+
+# Storage path for rootless users
+#
+# rootless_storage_path = "$HOME/.local/share/containers/storage"
+
+[storage.options]
+# Storage options to be passed to underlying storage drivers
+
+# AdditionalImageStores is used to pass paths to additional Read/Only image stores
+# Must be comma separated list.
+additionalimagestores = [
+]
+
+# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
+# to containers configured to create automatically a user namespace. Containers
+# configured to automatically create a user namespace can still overlap with containers
+# having an explicit mapping set.
+# This setting is ignored when running as rootless.
+# root-auto-userns-user = "storage"
+#
+# Auto-userns-min-size is the minimum size for a user namespace created automatically.
+# auto-userns-min-size=1024
+#
+# Auto-userns-max-size is the minimum size for a user namespace created automatically.
+# auto-userns-max-size=65536
+
+[storage.options.overlay]
+# ignore_chown_errors can be set to allow a non privileged user running with
+# a single UID within a user namespace to run containers. The user can pull
+# and use any image even those with multiple uids. Note multiple UIDs will be
+# squashed down to the default uid in the container. These images will have no
+# separation between the users in the container. Only supported for the overlay
+# and vfs drivers.
+#ignore_chown_errors = "false"
+
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
+# Path to an helper program to use for mounting the file system instead of mounting it
+# directly.
+#mount_program = "/usr/bin/fuse-overlayfs"
+
+# mountopt specifies comma separated list of extra mount options
+mountopt = "nodev"
+
+# Set to skip a PRIVATE bind mount on the storage home directory.
+# skip_mount_home = "false"
+
+# Set to use composefs to mount data layers with overlay.
+# use_composefs = "false"
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
+# ForceMask specifies the permissions mask that is used for new files and
+# directories.
+#
+# The values "shared" and "private" are accepted.
+# Octal permission masks are also accepted.
+#
+# "": No value specified.
+# All files/directories, get set with the permissions identified within the
+# image.
+# "private": it is equivalent to 0700.
+# All files/directories get set with 0700 permissions. The owner has rwx
+# access to the files. No other users on the system can access the files.
+# This setting could be used with networked based homedirs.
+# "shared": it is equivalent to 0755.
+# The owner has rwx access to the files and everyone else can read, access
+# and execute them. This setting is useful for sharing containers storage
+# with other users. For instance have a storage owned by root but shared
+# to rootless users as an additional store.
+# NOTE: All files within the image are made readable and executable by any
+# user on the system. Even /etc/shadow within your image is now readable by
+# any user.
+#
+# OCTAL: Users can experiment with other OCTAL Permissions.
+#
+# Note: The force_mask Flag is an experimental feature, it could change in the
+# future. When "force_mask" is set the original permission mask is stored in
+# the "user.containers.override_stat" xattr and the "mount_program" option must
+# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
+# extended attribute permissions to processes within containers rather then the
+# "force_mask" permissions.
+#
+# force_mask = ""
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index d6d547c64..a2d385008 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -1,15 +1,19 @@
package storage
import (
+ _ "embed"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "maps"
"os"
"path/filepath"
"reflect"
+ "slices"
"strings"
"sync"
+ "syscall"
"time"
// register all of the built-in drivers
@@ -20,14 +24,30 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
+ "github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type updateNameOperation int
+
+const (
+ setNames updateNameOperation = iota
+ addNames
+ removeNames
+)
+
+const (
+ volatileFlag = "Volatile"
+ mountLabelFlag = "MountLabel"
+ processLabelFlag = "ProcessLabel"
+ mountOptsFlag = "MountOpts"
)
var (
@@ -35,57 +55,54 @@ var (
storesLock sync.Mutex
)
-// ROFileBasedStore wraps up the methods of the various types of file-based
-// data stores that we implement which are needed for both read-only and
-// read-write files.
-type ROFileBasedStore interface {
- Locker
-
- // Load reloads the contents of the store from disk. It should be called
- // with the lock held.
- Load() error
-
- // ReloadIfChanged reloads the contents of the store from disk if it is changed.
- ReloadIfChanged() error
+// roMetadataStore wraps a method for reading metadata associated with an ID.
+type roMetadataStore interface {
+ // Metadata reads metadata associated with an item with the specified ID.
+ Metadata(id string) (string, error)
}
-// RWFileBasedStore wraps up the methods of various types of file-based data
-// stores that we implement using read-write files.
-type RWFileBasedStore interface {
- // Save saves the contents of the store to disk. It should be called with
- // the lock held, and Touch() should be called afterward before releasing the
- // lock.
- Save() error
+// rwMetadataStore wraps a method for setting metadata associated with an ID.
+type rwMetadataStore interface {
+ // SetMetadata updates the metadata associated with the item with the specified ID.
+ SetMetadata(id, metadata string) error
}
-// FileBasedStore wraps up the common methods of various types of file-based
-// data stores that we implement.
-type FileBasedStore interface {
- ROFileBasedStore
- RWFileBasedStore
+// metadataStore wraps up methods for getting and setting metadata associated with IDs.
+type metadataStore interface {
+ roMetadataStore
+ rwMetadataStore
}
-// ROMetadataStore wraps a method for reading metadata associated with an ID.
-type ROMetadataStore interface {
- // Metadata reads metadata associated with an item with the specified ID.
- Metadata(id string) (string, error)
+// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
+type ApplyStagedLayerOptions struct {
+ ID string // Mandatory
+ ParentLayer string // Optional
+ Names []string // Optional
+ MountLabel string // Optional
+ Writeable bool // Optional
+ LayerOptions *LayerOptions // Optional
+
+ DiffOutput *drivers.DriverWithDifferOutput // Mandatory
+ DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
}
-// RWMetadataStore wraps a method for setting metadata associated with an ID.
-type RWMetadataStore interface {
- // SetMetadata updates the metadata associated with the item with the specified ID.
- SetMetadata(id, metadata string) error
+// MultiListOptions contains options to pass to MultiList
+type MultiListOptions struct {
+ Images bool // if true, Images will be listed in the result
+ Layers bool // if true, layers will be listed in the result
+ Containers bool // if true, containers will be listed in the result
}
-// MetadataStore wraps up methods for getting and setting metadata associated with IDs.
-type MetadataStore interface {
- ROMetadataStore
- RWMetadataStore
+// MultiListResult contains slices of Images, Layers or Containers listed by MultiList method
+type MultiListResult struct {
+ Images []Image
+ Layers []Layer
+ Containers []Container
}
-// An ROBigDataStore wraps up the read-only big-data related methods of the
+// An roBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
-type ROBigDataStore interface {
+type roBigDataStore interface {
// BigData retrieves a (potentially large) piece of data associated with
// this ID, if it has previously been set.
BigData(id, key string) ([]byte, error)
@@ -103,8 +120,8 @@ type ROBigDataStore interface {
BigDataNames(id string) ([]string, error)
}
-// A RWImageBigDataStore wraps up how we store big-data associated with images.
-type RWImageBigDataStore interface {
+// A rwImageBigDataStore wraps up how we store big-data associated with images.
+type rwImageBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
// Pass github.com/containers/image/manifest.Digest as digestManifest
@@ -112,16 +129,16 @@ type RWImageBigDataStore interface {
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
-// A ContainerBigDataStore wraps up how we store big-data associated with containers.
-type ContainerBigDataStore interface {
- ROBigDataStore
+// A containerBigDataStore wraps up how we store big-data associated with containers.
+type containerBigDataStore interface {
+ roBigDataStore
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
}
-// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers.
-type ROLayerBigDataStore interface {
+// A roLayerBigDataStore wraps up how we store RO big-data associated with layers.
+type roLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
BigData(id, key string) (io.ReadCloser, error)
@@ -131,21 +148,15 @@ type ROLayerBigDataStore interface {
BigDataNames(id string) ([]string, error)
}
-// A RWLayerBigDataStore wraps up how we store big-data associated with layers.
-type RWLayerBigDataStore interface {
+// A rwLayerBigDataStore wraps up how we store big-data associated with layers.
+type rwLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data io.Reader) error
}
-// A LayerBigDataStore wraps up how we store big-data associated with layers.
-type LayerBigDataStore interface {
- ROLayerBigDataStore
- RWLayerBigDataStore
-}
-
-// A FlaggableStore can have flags set and cleared on items which it manages.
-type FlaggableStore interface {
+// A flaggableStore can have flags set and cleared on items which it manages.
+type flaggableStore interface {
// ClearFlag removes a named flag from an item in the store.
ClearFlag(id string, flag string) error
@@ -162,8 +173,11 @@ type Store interface {
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
+ ImageStore() string
+ TransientStore() bool
GraphDriverName() string
GraphOptions() []string
+ PullOptions() map[string]string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
@@ -265,6 +279,8 @@ type Store interface {
// Unmount attempts to unmount an image, given an ID.
// Returns whether or not the layer is still mounted.
+ // WARNING: The return value may already be obsolete by the time it is available
+ // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
UnmountImage(id string, force bool) (bool, error)
// Mount attempts to mount a layer, image, or container for access, and
@@ -283,9 +299,15 @@ type Store interface {
// Unmount attempts to unmount a layer, image, or container, given an ID, a
// name, or a mount path. Returns whether or not the layer is still mounted.
+ // WARNING: The return value may already be obsolete by the time it is available
+ // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
Unmount(id string, force bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
+ //
+ // WARNING: This value might already be obsolete by the time it is returned;
+ // In situations where concurrent mount/unmount attempts can happen, this field
+ // should not be used for any decisions, maybe apart from heuristic user warnings.
Mounted(id string) (int, error)
// Changes returns a summary of the changes which would need to be made
@@ -319,16 +341,24 @@ type Store interface {
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
- // ApplyDiffer applies a diff to a layer.
+ // ApplyDiffWithDiffer applies a diff to a layer.
+ // It is the caller responsibility to clean the staging directory if it is not
+ // successfully applied with ApplyStagedLayer.
+ // Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release
+ ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+
+ // PrepareStagedLayer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
- // successfully applied with ApplyDiffFromStagingDirectory.
- ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+ // successfully applied with ApplyStagedLayer.
+ PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
- // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
- ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
+ // ApplyStagedLayer combines the functions of creating a layer and using the staging
+ // directory to populate it.
+ // It marks the layer for automatic removal if applying the diff fails for any reason.
+ ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
- // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
- CleanupStagingDirectory(stagingDirectory string) error
+ // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
+ CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
@@ -341,6 +371,10 @@ type Store interface {
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
+ // LayersByTOCDigest returns a slice of the layers with the
+ // specified TOC digest value recorded for them.
+ LayersByTOCDigest(d digest.Digest) ([]Layer, error)
+
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
@@ -367,8 +401,17 @@ type Store interface {
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
+ // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
+ // AddNames adds the list of names for a layer, image, or container.
+ // Duplicate names are removed from the list automatically.
+ AddNames(id string, names []string) error
+
+ // RemoveNames removes the list of names for a layer, image, or container.
+ // Duplicate names are removed from the list automatically.
+ RemoveNames(id string, names []string) error
+
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
@@ -391,8 +434,20 @@ type Store interface {
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
+ // ImageDirectory returns a path of a directory which the caller can
+ // use to store data, specific to the image, which the library does not
+ // directly manage. The directory will be deleted when the image is
+ // deleted.
+ ImageDirectory(id string) (string, error)
+
+ // ImageRunDirectory returns a path of a directory which the caller can
+ // use to store data, specific to the image, which the library does not
+ // directly manage. The directory will be deleted when the host system
+ // is restarted.
+ ImageRunDirectory(id string) (string, error)
+
// ListLayerBigData retrieves a list of the (possibly large) chunks of
- // named data associated with an layer.
+ // named data associated with a layer.
ListLayerBigData(id string) ([]string, error)
// LayerBigData retrieves a (possibly large) chunk of named data
@@ -507,23 +562,44 @@ type Store interface {
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
- // LayerFromAdditionalLayerStore searches layers from the additional layer store and
- // returns the object for handling this. Note that this hasn't been stored to this store
- // yet so this needs to be done through PutAs method.
- // Releasing AdditionalLayer handler is caller's responsibility.
+ // LayerFromAdditionalLayerStore searches the additional layer store and returns an object
+ // which can create a layer with the specified TOC digest associated with the specified image
+ // reference. Note that this hasn't been stored to this store yet: the actual creation of
+ // a usable layer is done by calling the returned object's PutAs() method. After creating
+ // a layer, the caller must then call the object's Release() method to free any temporary
+ // resources which were allocated for the object by this method or the object's PutAs()
+ // method.
// This API is experimental and can be changed without bumping the major version number.
- LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
+ LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error)
+
+ // Tries to clean up remainders of previous containers or layers that are not
+ // references in the json files. These can happen in the case of unclean
+ // shutdowns or regular restarts in transient store mode.
+ GarbageCollect() error
+
+ // Check returns a report of things that look wrong in the store.
+ Check(options *CheckOptions) (CheckReport, error)
+ // Repair attempts to remediate problems mentioned in the CheckReport,
+ // usually by deleting layers and images which are damaged. If the
+ // right options are set, it will remove containers as well.
+ Repair(report CheckReport, options *RepairOptions) []error
+
+ // MultiList returns a MultiListResult structure that contains layer, image, or container
+ // extracts according to the values in MultiListOptions.
+ // MultiList returns consistent values as of a single point in time.
+ // WARNING: The values may already be out of date by the time they are returned to the caller.
+ MultiList(MultiListOptions) (MultiListResult, error)
}
-// AdditionalLayer reprents a layer that is contained in the additional layer store
+// AdditionalLayer represents a layer that is contained in the additional layer store
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayer interface {
// PutAs creates layer based on this handler, using diff contents from the additional
// layer store.
PutAs(id, parent string, names []string) (*Layer, error)
- // UncompressedDigest returns the uncompressed digest of this layer
- UncompressedDigest() digest.Digest
+ // TOCDigest returns the digest of TOC of this layer. Returns "" if unknown.
+ TOCDigest() digest.Digest
// CompressedSize returns the compressed size of this layer
CompressedSize() int64
@@ -547,6 +623,37 @@ type LayerOptions struct {
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
+ // OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
+ // provided along with these LayerOptions, and reliably known by the caller.
+ // The digest might not be exactly the digest of the provided tarstream
+ // (e.g. the digest might be of a compressed representation, while providing
+ // an uncompressed one); in that case the caller is responsible for the two matching.
+ // Use the default "" if this fields is not applicable or the value is not known.
+ OriginalDigest digest.Digest
+ // OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
+ // to OriginalDigest.
+ // If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
+ // not the tarstream.
+ // Use nil if not applicable or not known.
+ OriginalSize *int64
+ // UncompressedDigest specifies a digest of the uncompressed version (“DiffIDâ€)
+ // of the tarstream (diff), if one is provided along with these LayerOptions,
+ // and reliably known by the caller.
+ // Use the default "" if this fields is not applicable or the value is not known.
+ UncompressedDigest digest.Digest
+ // True is the layer info can be treated as volatile
+ Volatile bool
+ // BigData is a set of items which should be stored with the layer.
+ BigData []LayerBigDataOption
+ // Flags is a set of named flags and their values to store with the layer.
+ // Currently these can only be set when the layer record is created, but that
+ // could change in the future.
+ Flags map[string]interface{}
+}
+
+type LayerBigDataOption struct {
+ Key string
+ Data io.Reader
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
@@ -556,6 +663,26 @@ type ImageOptions struct {
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
+ // Digests is a list of digest values of the image's manifests, and
+ // possibly a manually-specified value, that we can use to locate the
+ // image. If Digest is set, its value is also in this list.
+ Digests []digest.Digest
+ // Metadata is caller-specified metadata associated with the layer.
+ Metadata string
+ // BigData is a set of items which should be stored with the image.
+ BigData []ImageBigDataOption
+ // NamesHistory is used for guessing for what this image was named when a container was created based
+ // on it, but it no longer has any names.
+ NamesHistory []string
+ // Flags is a set of named flags and their values to store with the image. Currently these can only
+ // be set when the image record is created, but that could change in the future.
+ Flags map[string]interface{}
+}
+
+type ImageBigDataOption struct {
+ Key string
+ Data []byte
+ Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
@@ -566,34 +693,78 @@ type ContainerOptions struct {
// or, if it is not being created based on an image, the Store object.
types.IDMappingOptions
LabelOpts []string
- Flags map[string]interface{}
- MountOpts []string
- Volatile bool
+ // Flags is a set of named flags and their values to store with the container.
+ // Currently these can only be set when the container record is created, but that
+ // could change in the future.
+ Flags map[string]interface{}
+ MountOpts []string
+ Volatile bool
+ StorageOpt map[string]string
+ // Metadata is caller-specified metadata associated with the container.
+ Metadata string
+ // BigData is a set of items which should be stored for the container.
+ BigData []ContainerBigDataOption
+}
+
+type ContainerBigDataOption struct {
+ Key string
+ Data []byte
}
type store struct {
- lastLoaded time.Time
- runRoot string
- graphLock Locker
- usernsLock Locker
+ // # Locking hierarchy:
+ // These locks do not all need to be held simultaneously, but if some code does need to lock more than one, it MUST do so in this order:
+ // - graphLock
+ // - layerStore.start{Reading,Writing}
+ // - roLayerStores[].startReading (in the order of the items of the roLayerStores array)
+ // - imageStore.start{Reading,Writing}
+ // - roImageStores[].startReading (in the order of the items of the roImageStores array)
+ // - containerStore.start{Reading,Writing}
+
+ // The following fields are only set when constructing store, and must never be modified afterwards.
+ // They are safe to access without any other locking.
+ runRoot string
+ graphDriverName string // Initially set to the user-requested value, possibly ""; updated during store construction, and does not change afterwards.
+ graphDriverPriority []string
+ // graphLock:
+ // - Ensures that we always reload graphDriver, and the primary layer store, after any process does store.Shutdown. This is necessary
+ // because (??) the Shutdown may forcibly unmount and clean up, affecting graph driver state in a way only a graph driver
+ // and layer store reinitialization can notice.
+ // - Ensures that store.Shutdown is exclusive with mount operations. This is necessary at because some
+ // graph drivers call mount.MakePrivate() during initialization, the mount operations require that, and the driver’s Cleanup() method
+ // may undo that. So, holding graphLock is required throughout the duration of Shutdown(), and the duration of any mount
+ // (but not unmount) calls.
+ // - Within this store object, protects access to some related in-memory state.
+ graphLock *lockfile.LockFile
+ usernsLock *lockfile.LockFile
graphRoot string
- graphDriverName string
graphOptions []string
+ imageStoreDir string
+ pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
- additionalUIDs *idSet // Set by getAvailableIDs()
- additionalGIDs *idSet // Set by getAvailableIDs()
autoNsMinSize uint32
autoNsMaxSize uint32
- graphDriver drivers.Driver
- layerStore LayerStore
- roLayerStores []ROLayerStore
- imageStore ImageStore
- roImageStores []ROImageStore
- containerStore ContainerStore
+ imageStore rwImageStore
+ rwImageStores []rwImageStore
+ roImageStores []roImageStore
+ containerStore rwContainerStore
digestLockRoot string
disableVolatile bool
+ transientStore bool
+
+ // The following fields can only be accessed with graphLock held.
+ graphLockLastWrite lockfile.LastWrite
+ // FIXME: This field is only set when holding graphLock, but locking rules of the driver
+ // interface itself are not documented here. It is extensively used without holding graphLock.
+ graphDriver drivers.Driver
+ layerStoreUseGetters rwLayerStore // Almost all users should use the provided accessors instead of accessing this field directly.
+ roLayerStoresUseGetters []roLayerStore // Almost all users should use the provided accessors instead of accessing this field directly.
+
+ // FIXME: The following fields need locking, and don’t have it.
+ additionalUIDs *idSet // Set by getAvailableIDs()
+ additionalGIDs *idSet // Set by getAvailableIDs()
}
// GetStore attempts to find an already-created Store object matching the
@@ -603,19 +774,24 @@ type store struct {
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
-// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
-// * `STORAGE_OPTS` for the string of options to pass to the driver
+// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use
+// - `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
-// if reexec.Init() {
-// return
-// }
+//
+// if reexec.Init() {
+// return
+// }
func GetStore(options types.StoreOptions) (Store, error) {
+ defaultOpts, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
- options = types.Options()
+ options = defaultOpts
}
if options.GraphRoot != "" {
@@ -636,37 +812,49 @@ func GetStore(options types.StoreOptions) (Store, error) {
storesLock.Lock()
defer storesLock.Unlock()
+ // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first
for _, s := range stores {
- if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
+ if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
- if options.GraphRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
- }
- if options.RunRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
+ // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
+ switch {
+ case options.RunRoot == "" && options.GraphRoot == "":
+ return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions)
+ case options.GraphRoot == "":
+ options.GraphRoot = defaultOpts.GraphRoot
+ case options.RunRoot == "":
+ options.RunRoot = defaultOpts.RunRoot
}
- if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
+ if err := os.MkdirAll(options.RunRoot, 0o700); err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil {
return nil, err
}
- if err := os.MkdirAll(options.GraphRoot, 0700); err != nil {
+ if options.ImageStore != "" {
+ if err := os.MkdirAll(options.ImageStore, 0o700); err != nil {
+ return nil, err
+ }
+ }
+ if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil {
return nil, err
}
- for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
- if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil {
+ if options.ImageStore != "" {
+ if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil {
return nil, err
}
}
- graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
+ graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
- usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock"))
+ usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock"))
if err != nil {
return nil, err
}
@@ -680,20 +868,25 @@ func GetStore(options types.StoreOptions) (Store, error) {
autoNsMaxSize = AutoUserNsMaxSize
}
s := &store{
- runRoot: options.RunRoot,
- graphLock: graphLock,
- graphRoot: options.GraphRoot,
- graphDriverName: options.GraphDriverName,
- graphOptions: options.GraphDriverOptions,
- uidMap: copyIDMap(options.UIDMap),
- gidMap: copyIDMap(options.GIDMap),
- autoUsernsUser: options.RootAutoNsUser,
- autoNsMinSize: autoNsMinSize,
- autoNsMaxSize: autoNsMaxSize,
- additionalUIDs: nil,
- additionalGIDs: nil,
- usernsLock: usernsLock,
- disableVolatile: options.DisableVolatile,
+ runRoot: options.RunRoot,
+ graphDriverName: options.GraphDriverName,
+ graphDriverPriority: options.GraphDriverPriority,
+ graphLock: graphLock,
+ usernsLock: usernsLock,
+ graphRoot: options.GraphRoot,
+ graphOptions: options.GraphDriverOptions,
+ imageStoreDir: options.ImageStore,
+ pullOptions: options.PullOptions,
+ uidMap: copySlicePreferringNil(options.UIDMap),
+ gidMap: copySlicePreferringNil(options.GIDMap),
+ autoUsernsUser: options.RootAutoNsUser,
+ autoNsMinSize: autoNsMinSize,
+ autoNsMaxSize: autoNsMaxSize,
+ disableVolatile: options.DisableVolatile,
+ transientStore: options.TransientStore,
+
+ additionalUIDs: nil,
+ additionalGIDs: nil,
}
if err := s.load(); err != nil {
return nil, err
@@ -704,30 +897,6 @@ func GetStore(options types.StoreOptions) (Store, error) {
return s, nil
}
-func copyUint32Slice(slice []uint32) []uint32 {
- m := []uint32{}
- if slice != nil {
- m = make([]uint32, len(slice))
- copy(m, slice)
- }
- if len(m) > 0 {
- return m[:]
- }
- return nil
-}
-
-func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
- m := []idtools.IDMap{}
- if idmap != nil {
- m = make([]idtools.IDMap, len(idmap))
- copy(m, idmap)
- }
- if len(m) > 0 {
- return m[:]
- }
- return nil
-}
-
func (s *store) RunRoot() string {
return s.runRoot
}
@@ -740,65 +909,118 @@ func (s *store) GraphRoot() string {
return s.graphRoot
}
+func (s *store) ImageStore() string {
+ return s.imageStoreDir
+}
+
+func (s *store) TransientStore() bool {
+ return s.transientStore
+}
+
func (s *store) GraphOptions() []string {
return s.graphOptions
}
+func (s *store) PullOptions() map[string]string {
+ cp := make(map[string]string, len(s.pullOptions))
+ maps.Copy(cp, s.pullOptions)
+ return cp
+}
+
func (s *store) UIDMap() []idtools.IDMap {
- return copyIDMap(s.uidMap)
+ return copySlicePreferringNil(s.uidMap)
}
func (s *store) GIDMap() []idtools.IDMap {
- return copyIDMap(s.gidMap)
+ return copySlicePreferringNil(s.gidMap)
}
+// This must only be called when constructing store; it writes to fields that are assumed to be constant after construction.
func (s *store) load() error {
- driver, err := s.GraphDriver()
- if err != nil {
+ var driver drivers.Driver
+ if err := func() error { // A scope for defer
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+ lastWrite, err := s.graphLock.GetLastWrite()
+ if err != nil {
+ return err
+ }
+ s.graphLockLastWrite = lastWrite
+ driver, err = s.createGraphDriverLocked()
+ if err != nil {
+ return err
+ }
+ s.graphDriver = driver
+ s.graphDriverName = driver.String()
+ return nil
+ }(); err != nil {
return err
}
- s.graphDriver = driver
- s.graphDriverName = driver.String()
driverPrefix := s.graphDriverName + "-"
- gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
- if err := os.MkdirAll(gipath, 0700); err != nil {
- return err
+ imgStoreRoot := s.imageStoreDir
+ if imgStoreRoot == "" {
+ imgStoreRoot = s.graphRoot
}
- ris, err := newImageStore(gipath)
- if err != nil {
+ gipath := filepath.Join(imgStoreRoot, driverPrefix+"images")
+ if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
- s.imageStore = ris
- if _, err := s.ROImageStores(); err != nil {
+ imageStore, err := newImageStore(gipath)
+ if err != nil {
return err
}
+ s.imageStore = imageStore
+
+ s.rwImageStores = []rwImageStore{imageStore}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
- if err := os.MkdirAll(gcpath, 0700); err != nil {
+ if err := os.MkdirAll(gcpath, 0o700); err != nil {
return err
}
- rcs, err := newContainerStore(gcpath)
- if err != nil {
+ rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
return err
}
- rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
- if err := os.MkdirAll(rcpath, 0700); err != nil {
+
+ rcs, err := newContainerStore(gcpath, rcpath, s.transientStore)
+ if err != nil {
return err
}
+
s.containerStore = rcs
- for _, store := range driver.AdditionalImageStores() {
+ additionalImageStores := s.graphDriver.AdditionalImageStores()
+ if s.imageStoreDir != "" {
+ additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
+ }
+
+ for _, store := range additionalImageStores {
gipath := filepath.Join(store, driverPrefix+"images")
- ris, err := newROImageStore(gipath)
- if err != nil {
- return err
+ var ris roImageStore
+ // both the graphdriver and the imagestore must be used read-write.
+ if store == s.imageStoreDir || store == s.graphRoot {
+ imageStore, err := newImageStore(gipath)
+ if err != nil {
+ return err
+ }
+ s.rwImageStores = append(s.rwImageStores, imageStore)
+ ris = imageStore
+ } else {
+ ris, err = newROImageStore(gipath)
+ if err != nil {
+ if errors.Is(err, syscall.EROFS) {
+ logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err)
+ continue
+ }
+ return err
+ }
}
s.roImageStores = append(s.roImageStores, ris)
}
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
- if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
+ if err := os.MkdirAll(s.digestLockRoot, 0o700); err != nil {
return err
}
@@ -807,135 +1029,401 @@ func (s *store) load() error {
// GetDigestLock returns a digest-specific Locker.
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
- return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
+ return lockfile.GetLockFile(filepath.Join(s.digestLockRoot, d.String()))
}
-func (s *store) getGraphDriver() (drivers.Driver, error) {
- if s.graphDriver != nil {
- return s.graphDriver, nil
+// startUsingGraphDriver obtains s.graphLock and ensures that s.graphDriver is set and fresh.
+// It only intended to be used on a fully-constructed store.
+// If this succeeds, the caller MUST call stopUsingGraphDriver().
+func (s *store) startUsingGraphDriver() error {
+ s.graphLock.Lock()
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ s.graphLock.Unlock()
+ }
+ }()
+
+ lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite)
+ if err != nil {
+ return err
}
+ if modified {
+ driver, err := s.createGraphDriverLocked()
+ if err != nil {
+ return err
+ }
+ // Our concurrency design requires s.graphDriverName not to be modified after
+ // store is constructed.
+ // It’s fine for driver.String() not to match the requested graph driver name
+ // (e.g. if the user asks for overlay2 and gets overlay), but it must be an idempotent
+ // mapping:
+ // driver1 := drivers.New(userInput, config)
+ // name1 := driver1.String()
+ // name2 := drivers.New(name1, config).String()
+ // assert(name1 == name2)
+ if s.graphDriverName != driver.String() {
+ return fmt.Errorf("graph driver name changed from %q to %q during reload",
+ s.graphDriverName, driver.String())
+ }
+ s.graphDriver = driver
+ s.layerStoreUseGetters = nil
+ s.graphLockLastWrite = lastWrite
+ }
+
+ succeeded = true
+ return nil
+}
+
+// stopUsingGraphDriver releases graphLock obtained by startUsingGraphDriver.
+func (s *store) stopUsingGraphDriver() {
+ s.graphLock.Unlock()
+}
+
+// createGraphDriverLocked creates a new instance of graph driver for s, and returns it.
+// Almost all users should use startUsingGraphDriver instead.
+// The caller must hold s.graphLock.
+func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
config := drivers.Options{
- Root: s.graphRoot,
- RunRoot: s.runRoot,
- DriverOptions: s.graphOptions,
- UIDMaps: s.uidMap,
- GIDMaps: s.gidMap,
+ Root: s.graphRoot,
+ ImageStore: s.imageStoreDir,
+ RunRoot: s.runRoot,
+ DriverPriority: s.graphDriverPriority,
+ DriverOptions: s.graphOptions,
}
- driver, err := drivers.New(s.graphDriverName, config)
- if err != nil {
- return nil, err
- }
- s.graphDriver = driver
- s.graphDriverName = driver.String()
- return driver, nil
+ return drivers.New(s.graphDriverName, config)
}
func (s *store) GraphDriver() (drivers.Driver, error) {
- s.graphLock.Lock()
- defer s.graphLock.Unlock()
- if s.graphLock.TouchedSince(s.lastLoaded) {
- s.graphDriver = nil
- s.layerStore = nil
- s.lastLoaded = time.Now()
+ if err := s.startUsingGraphDriver(); err != nil {
+ return nil, err
}
- return s.getGraphDriver()
+ defer s.stopUsingGraphDriver()
+ return s.graphDriver, nil
}
-// LayerStore obtains and returns a handle to the writeable layer store object
-// used by the Store. Accessing this store directly will bypass locking and
-// synchronization, so it is not a part of the exported Store interface.
-func (s *store) LayerStore() (LayerStore, error) {
- s.graphLock.Lock()
- defer s.graphLock.Unlock()
- if s.graphLock.TouchedSince(s.lastLoaded) {
- s.graphDriver = nil
- s.layerStore = nil
- s.lastLoaded = time.Now()
- }
- if s.layerStore != nil {
- return s.layerStore, nil
- }
- driver, err := s.getGraphDriver()
- if err != nil {
- return nil, err
+// getLayerStoreLocked obtains and returns a handle to the writeable layer store object
+// used by the Store.
+// It must be called with s.graphLock held.
+func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
+ if s.layerStoreUseGetters != nil {
+ return s.layerStoreUseGetters, nil
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
- if err := os.MkdirAll(rlpath, 0700); err != nil {
+ if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
- if err := os.MkdirAll(glpath, 0700); err != nil {
+ if err := os.MkdirAll(glpath, 0o700); err != nil {
return nil, err
}
- rls, err := s.newLayerStore(rlpath, glpath, driver)
+ ilpath := ""
+ if s.imageStoreDir != "" {
+ ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
+ }
+ rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
if err != nil {
return nil, err
}
- s.layerStore = rls
- return s.layerStore, nil
+ s.layerStoreUseGetters = rls
+ return s.layerStoreUseGetters, nil
}
-// ROLayerStores obtains additional read/only layer store objects used by the
-// Store. Accessing these stores directly will bypass locking and
-// synchronization, so it is not part of the exported Store interface.
-func (s *store) ROLayerStores() ([]ROLayerStore, error) {
- s.graphLock.Lock()
- defer s.graphLock.Unlock()
- if s.roLayerStores != nil {
- return s.roLayerStores, nil
- }
- driver, err := s.getGraphDriver()
- if err != nil {
+// getLayerStore obtains and returns a handle to the writeable layer store object
+// used by the store.
+// It must be called WITHOUT s.graphLock held.
+func (s *store) getLayerStore() (rwLayerStore, error) {
+ if err := s.startUsingGraphDriver(); err != nil {
return nil, err
}
+ defer s.stopUsingGraphDriver()
+ return s.getLayerStoreLocked()
+}
+
+// getROLayerStoresLocked obtains additional read/only layer store objects used by the
+// Store.
+// It must be called with s.graphLock held.
+func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
+ if s.roLayerStoresUseGetters != nil {
+ return s.roLayerStoresUseGetters, nil
+ }
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
- if err := os.MkdirAll(rlpath, 0700); err != nil {
+ if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
- for _, store := range driver.AdditionalImageStores() {
+
+ for _, store := range s.graphDriver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
- rls, err := newROLayerStore(rlpath, glpath, driver)
+
+ rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
if err != nil {
return nil, err
}
- s.roLayerStores = append(s.roLayerStores, rls)
+ s.roLayerStoresUseGetters = append(s.roLayerStoresUseGetters, rls)
+ }
+ return s.roLayerStoresUseGetters, nil
+}
+
+// bothLayerStoreKindsLocked returns the primary, and additional read-only, layer store objects used by the store.
+// It must be called with s.graphLock held.
+func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error) {
+ primary, err := s.getLayerStoreLocked()
+ if err != nil {
+ return nil, nil, fmt.Errorf("loading primary layer store data: %w", err)
+ }
+ additional, err := s.getROLayerStoresLocked()
+ if err != nil {
+ return nil, nil, fmt.Errorf("loading additional layer stores: %w", err)
+ }
+ return primary, additional, nil
+}
+
+// bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store.
+// It must be called WITHOUT s.graphLock held.
+func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) {
+ if err := s.startUsingGraphDriver(); err != nil {
+ return nil, nil, err
+ }
+ defer s.stopUsingGraphDriver()
+ return s.bothLayerStoreKindsLocked()
+}
+
+// allLayerStores returns a list of all layer store objects used by the Store.
+// This is a convenience method for read-only users of the Store.
+// It must be called with s.graphLock held.
+func (s *store) allLayerStoresLocked() ([]roLayerStore, error) {
+ primary, additional, err := s.bothLayerStoreKindsLocked()
+ if err != nil {
+ return nil, err
+ }
+ return append([]roLayerStore{primary}, additional...), nil
+}
+
+// allLayerStores returns a list of all layer store objects used by the Store.
+// This is a convenience method for read-only users of the Store.
+// It must be called WITHOUT s.graphLock held.
+func (s *store) allLayerStores() ([]roLayerStore, error) {
+ if err := s.startUsingGraphDriver(); err != nil {
+ return nil, err
+ }
+ defer s.stopUsingGraphDriver()
+ return s.allLayerStoresLocked()
+}
+
+// readAllLayerStores processes allLayerStores() in order:
+// It locks the store for reading, checks for updates, and calls
+//
+// (data, done, err) := fn(store)
+//
+// until the callback returns done == true, and returns the data from the callback.
+//
+// If reading any layer store fails, it immediately returns ({}, true, err).
+//
+// If all layer stores are processed without setting done == true, it returns ({}, false, nil).
+//
+// Typical usage:
+//
+// if res, done, err := s.readAllLayerStores(store, func(…) {
+// …
+// }; done {
+// return res, err
+// }
+func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) {
+ var zeroRes T // A zero value of T
+
+ layerStores, err := s.allLayerStores()
+ if err != nil {
+ return zeroRes, true, err
+ }
+ for _, s := range layerStores {
+ store := s
+ if err := store.startReading(); err != nil {
+ return zeroRes, true, err
+ }
+ defer store.stopReading()
+ if res, done, err := fn(store); done {
+ return res, true, err
+ }
+ }
+ return zeroRes, false, nil
+}
+
+// writeToLayerStore is a helper for working with store.getLayerStore():
+// It locks the store for writing, checks for updates, and calls fn()
+// It returns the return value of fn, or its own error initializing the store.
+func writeToLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) {
+ var zeroRes T // A zero value of T
+
+ store, err := s.getLayerStore()
+ if err != nil {
+ return zeroRes, err
+ }
+
+ if err := store.startWriting(); err != nil {
+ return zeroRes, err
+ }
+ defer store.stopWriting()
+ return fn(store)
+}
+
+// readOrWriteAllLayerStores processes allLayerStores() in order:
+// It locks the writeable store for writing and all others for reading, checks
+// for updates, and calls
+//
+// (data, done, err) := fn(store)
+//
+// until the callback returns done == true, and returns the data from the callback.
+//
+// If reading or writing any layer store fails, it immediately returns ({}, true, err).
+//
+// If all layer stores are processed without setting done == true, it returns ({}, false, nil).
+//
+// Typical usage:
+//
+// if res, done, err := s.readOrWriteAllLayerStores(store, func(…) {
+// …
+// }; done {
+// return res, err
+// }
+func readOrWriteAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) {
+ var zeroRes T // A zero value of T
+
+ rwLayerStore, roLayerStores, err := s.bothLayerStoreKinds()
+ if err != nil {
+ return zeroRes, true, err
+ }
+
+ if err := rwLayerStore.startWriting(); err != nil {
+ return zeroRes, true, err
+ }
+ defer rwLayerStore.stopWriting()
+ if res, done, err := fn(rwLayerStore); done {
+ return res, true, err
+ }
+
+ for _, s := range roLayerStores {
+ store := s
+ if err := store.startReading(); err != nil {
+ return zeroRes, true, err
+ }
+ defer store.stopReading()
+ if res, done, err := fn(store); done {
+ return res, true, err
+ }
+ }
+ return zeroRes, false, nil
+}
+
+// allImageStores returns a list of all image store objects used by the Store.
+// This is a convenience method for read-only users of the Store.
+func (s *store) allImageStores() []roImageStore {
+ return append([]roImageStore{s.imageStore}, s.roImageStores...)
+}
+
+// readAllImageStores processes allImageStores() in order:
+// It locks the store for reading, checks for updates, and calls
+//
+// (data, done, err) := fn(store)
+//
+// until the callback returns done == true, and returns the data from the callback.
+//
+// If reading any Image store fails, it immediately returns ({}, true, err).
+//
+// If all Image stores are processed without setting done == true, it returns ({}, false, nil).
+//
+// Typical usage:
+//
+// if res, done, err := readAllImageStores(store, func(…) {
+// …
+// }; done {
+// return res, err
+// }
+func readAllImageStores[T any](s *store, fn func(store roImageStore) (T, bool, error)) (T, bool, error) {
+ var zeroRes T // A zero value of T
+
+ for _, s := range s.allImageStores() {
+ store := s
+ if err := store.startReading(); err != nil {
+ return zeroRes, true, err
+ }
+ defer store.stopReading()
+ if res, done, err := fn(store); done {
+ return res, true, err
+ }
}
- return s.roLayerStores, nil
+ return zeroRes, false, nil
}
-// ImageStore obtains and returns a handle to the writable image store object
-// used by the Store. Accessing this store directly will bypass locking and
-// synchronization, so it is not a part of the exported Store interface.
-func (s *store) ImageStore() (ImageStore, error) {
- if s.imageStore != nil {
- return s.imageStore, nil
+// writeToImageStore is a convenience helper for working with store.imageStore:
+// It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore.
+// It returns the return value of fn, or its own error initializing the store.
+func writeToImageStore[T any](s *store, fn func() (T, error)) (T, error) {
+ if err := s.imageStore.startWriting(); err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, err
}
- return nil, ErrLoadError
+ defer s.imageStore.stopWriting()
+ return fn()
}
-// ROImageStores obtains additional read/only image store objects used by the
-// Store. Accessing these stores directly will bypass locking and
-// synchronization, so it is not a part of the exported Store interface.
-func (s *store) ROImageStores() ([]ROImageStore, error) {
- if s.imageStore == nil {
- return nil, ErrLoadError
+// readContainerStore is a convenience helper for working with store.containerStore:
+// It locks the store for reading, checks for updates, and calls fn(), which can then access store.containerStore.
+// If reading the container store fails, it returns ({}, true, err).
+// Returns the return value of fn on success.
+func readContainerStore[T any](s *store, fn func() (T, bool, error)) (T, bool, error) {
+ if err := s.containerStore.startReading(); err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, true, err
}
+ defer s.containerStore.stopReading()
+ return fn()
+}
- return s.roImageStores, nil
+// writeToContainerStore is a convenience helper for working with store.containerStore:
+// It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore.
+// It returns the return value of fn, or its own error initializing the store.
+func writeToContainerStore[T any](s *store, fn func() (T, error)) (T, error) {
+ if err := s.containerStore.startWriting(); err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, err
+ }
+ defer s.containerStore.stopWriting()
+ return fn()
}
-// ContainerStore obtains and returns a handle to the container store object
-// used by the Store. Accessing this store directly will bypass locking and
-// synchronization, so it is not a part of the exported Store interface.
-func (s *store) ContainerStore() (ContainerStore, error) {
- if s.containerStore != nil {
- return s.containerStore, nil
+// writeToAllStores is a convenience helper for writing to all three stores:
+// It locks the stores for writing, checks for updates, and calls fn(), which can then access the provided layer store,
+// s.imageStore and s.containerStore.
+// It returns the return value of fn, or its own error initializing the stores.
+func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error {
+ rlstore, err := s.getLayerStore()
+ if err != nil {
+ return err
+ }
+
+ if err := rlstore.startWriting(); err != nil {
+ return err
+ }
+ defer rlstore.stopWriting()
+ if err := s.imageStore.startWriting(); err != nil {
+ return err
+ }
+ defer s.imageStore.stopWriting()
+ if err := s.containerStore.startWriting(); err != nil {
+ return err
}
- return nil, ErrLoadError
+ defer s.containerStore.stopWriting()
+
+ return fn(rlstore)
}
+// canUseShifting returns true if we can use mount-time arguments (shifting) to
+// avoid having to create a mapped top layer for a base image when we want to
+// use it to create a container using ID mappings.
+// On entry:
+// - rlstore must be locked for writing
func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
if !s.graphDriver.SupportsShifting() {
return false
@@ -949,35 +1437,16 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return true
}
-func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) {
+// On entry:
+// - rlstore must be locked for writing
+// - rlstores MUST NOT be locked
+func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
var parentLayer *Layer
- rlstore, err := s.LayerStore()
- if err != nil {
- return nil, -1, err
- }
- rlstores, err := s.ROLayerStores()
- if err != nil {
- return nil, -1, err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, -1, err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return nil, -1, err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, -1, err
- }
- if id == "" {
- id = stringid.GenerateRandomID()
- }
- if options == nil {
- options = &LayerOptions{}
+ var options LayerOptions
+ if lOptions != nil {
+ options = *lOptions
+ options.BigData = slices.Clone(lOptions.BigData)
+ options.Flags = copyMapPreferringNil(lOptions.Flags)
}
if options.HostUIDMapping {
options.UIDMap = nil
@@ -989,14 +1458,13 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer
- for _, l := range append([]ROLayerStore{rlstore}, rlstores...) {
+ for _, l := range append([]roLayerStore{rlstore}, rlstores...) {
lstore := l
if lstore != rlstore {
- lstore.RLock()
- defer lstore.Unlock()
- if err := lstore.ReloadIfChanged(); err != nil {
+ if err := lstore.startReading(); err != nil {
return nil, -1, err
}
+ defer lstore.stopReading()
}
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
@@ -1008,7 +1476,12 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, ErrLayerUnknown
}
parentLayer = ilayer
- containers, err := rcstore.Containers()
+
+ if err := s.containerStore.startWriting(); err != nil {
+ return nil, -1, err
+ }
+ defer s.containerStore.stopWriting()
+ containers, err := s.containerStore.Containers()
if err != nil {
return nil, -1, err
}
@@ -1024,6 +1497,13 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
gidMap = ilayer.GIDMap
}
} else {
+ // FIXME? It’s unclear why we are holding containerStore locked here at all
+ // (and because we are not modifying it, why it is a write lock, not a read lock).
+ if err := s.containerStore.startWriting(); err != nil {
+ return nil, -1, err
+ }
+ defer s.containerStore.stopWriting()
+
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
@@ -1031,20 +1511,29 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
gidMap = s.gidMap
}
}
- var layerOptions *LayerOptions
if s.canUseShifting(uidMap, gidMap) {
- layerOptions = &LayerOptions{IDMappingOptions: types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}}
+ options.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
} else {
- layerOptions = &LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: options.HostUIDMapping,
- HostGIDMapping: options.HostGIDMapping,
- UIDMap: copyIDMap(uidMap),
- GIDMap: copyIDMap(gidMap),
- },
+ options.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: options.HostUIDMapping,
+ HostGIDMapping: options.HostGIDMapping,
+ UIDMap: copySlicePreferringNil(uidMap),
+ GIDMap: copySlicePreferringNil(gidMap),
}
}
- return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff)
+ return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo)
+}
+
+func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
+ rlstore, rlstores, err := s.bothLayerStoreKinds()
+ if err != nil {
+ return nil, -1, err
+ }
+ if err := rlstore.startWriting(); err != nil {
+ return nil, -1, err
+ }
+ defer rlstore.stopWriting()
+ return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@@ -1052,33 +1541,19 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string
return layer, err
}
-func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
- if id == "" {
- id = stringid.GenerateRandomID()
- }
-
+func (s *store) CreateImage(id string, names []string, layer, metadata string, iOptions *ImageOptions) (*Image, error) {
if layer != "" {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- lstores, err := s.ROLayerStores()
+ layerStores, err := s.allLayerStores()
if err != nil {
return nil, err
}
var ilayer *Layer
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range layerStores {
store := s
- if store == lstore {
- store.Lock()
- } else {
- store.RLock()
- }
- defer store.Unlock()
- err := store.ReloadIfChanged()
- if err != nil {
+ if err := store.startReading(); err != nil {
return nil, err
}
+ defer store.stopReading()
ilayer, err = store.Get(layer)
if err == nil {
break
@@ -1090,29 +1565,101 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
layer = ilayer.ID
}
- ristore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return nil, err
- }
-
- creationDate := time.Now().UTC()
- if options != nil && !options.CreationDate.IsZero() {
- creationDate = options.CreationDate
- }
-
- return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
-}
+ return writeToImageStore(s, func() (*Image, error) {
+ var options ImageOptions
+ var namesToAddAfterCreating []string
-func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options types.IDMappingOptions) (*Layer, error) {
- layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool {
- // If the driver supports shifting and the layer has no mappings, we can use it.
- if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
- return true
+ // Check if the ID refers to an image in a read-only store -- we want
+ // to allow images in read-only stores to have their names changed, so
+ // if we find one, merge the new values in with what we know about the
+ // image that's already there.
+ if id != "" {
+ for _, is := range s.roImageStores {
+ store := is
+ if err := store.startReading(); err != nil {
+ return nil, err
+ }
+ defer store.stopReading()
+ if i, err := store.Get(id); err == nil {
+ // set information about this image in "options"
+ options = ImageOptions{
+ Metadata: i.Metadata,
+ CreationDate: i.Created,
+ Digest: i.Digest,
+ Digests: copySlicePreferringNil(i.Digests),
+ NamesHistory: copySlicePreferringNil(i.NamesHistory),
+ }
+ for _, key := range i.BigDataNames {
+ data, err := store.BigData(id, key)
+ if err != nil {
+ return nil, err
+ }
+ dataDigest, err := store.BigDataDigest(id, key)
+ if err != nil {
+ return nil, err
+ }
+ options.BigData = append(options.BigData, ImageBigDataOption{
+ Key: key,
+ Data: data,
+ Digest: dataDigest,
+ })
+ }
+ namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names))
+ break
+ }
+ }
+ }
+
+ // merge any passed-in options into "options" as best we can
+ if iOptions != nil {
+ if !iOptions.CreationDate.IsZero() {
+ options.CreationDate = iOptions.CreationDate
+ }
+ if iOptions.Digest != "" {
+ options.Digest = iOptions.Digest
+ }
+ options.Digests = append(options.Digests, iOptions.Digests...)
+ if iOptions.Metadata != "" {
+ options.Metadata = iOptions.Metadata
+ }
+ options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
+ options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ maps.Copy(options.Flags, iOptions.Flags)
+ }
+
+ if options.CreationDate.IsZero() {
+ options.CreationDate = time.Now().UTC()
+ }
+ if metadata != "" {
+ options.Metadata = metadata
+ }
+
+ res, err := s.imageStore.create(id, names, layer, options)
+ if err == nil && len(namesToAddAfterCreating) > 0 {
+ // set any names we pulled up from an additional image store, now that we won't be
+ // triggering a duplicate names error
+ err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames)
+ }
+ return res, err
+ })
+}
+
+// imageTopLayerForMapping locates the layer that can take the place of the
+// image's top layer as the shared parent layer for a one or more containers
+// which are using ID mappings.
+// On entry:
+// - ristore must be locked EITHER for reading or writing
+// - s.imageStore must be locked for writing; it might be identical to ristore.
+// - rlstore must be locked for writing
+// - lstores must all be locked for reading
+func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) {
+ layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool {
+ // If the driver supports shifting and the layer has no mappings, we can use it.
+ if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
+ return true
}
// If we want host mapping, and the layer uses mappings, it's not the best match.
if options.HostUIDMapping && len(layer.UIDMap) != 0 {
@@ -1121,25 +1668,15 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
return false
}
- // If we don't care about the mapping, it's fine.
- if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
- return true
- }
// Compare the maps.
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
}
var layer, parentLayer *Layer
- allStores := append([]ROLayerStore{rlstore}, lstores...)
+ allStores := append([]roLayerStore{rlstore}, lstores...)
// Locate the image's top layer and its parent, if it has one.
+ createMappedLayer := ristore == s.imageStore
for _, s := range allStores {
store := s
- if store != rlstore {
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
- }
// Walk the top layer list.
for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
if cLayer, err := store.Get(candidate); err == nil {
@@ -1166,6 +1703,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if layer == nil {
layer = cLayer
parentLayer = cParentLayer
+ if store != rlstore {
+ // The layer is in another store, so we cannot
+ // create a mapped version of it to the image.
+ createMappedLayer = false
+ }
}
}
}
@@ -1180,49 +1722,50 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
return layer, nil
}
// The top layer's mappings don't match the ones we want, and it's in an image store
- // that lets us edit image metadata...
- if istore, ok := ristore.(*imageStore); ok {
- // ... so create a duplicate of the layer with the desired mappings, and
- // register it as an alternate top layer in the image.
- var layerOptions LayerOptions
- if s.canUseShifting(options.UIDMap, options.GIDMap) {
- layerOptions = LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- UIDMap: nil,
- GIDMap: nil,
- },
- }
- } else {
- layerOptions = LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: options.HostUIDMapping,
- HostGIDMapping: options.HostGIDMapping,
- UIDMap: copyIDMap(options.UIDMap),
- GIDMap: copyIDMap(options.GIDMap),
- },
- }
+ // that lets us edit image metadata, so create a duplicate of the layer with the desired
+ // mappings, and register it as an alternate top layer in the image.
+ var layerOptions LayerOptions
+ if s.canUseShifting(options.UIDMap, options.GIDMap) {
+ layerOptions.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ UIDMap: nil,
+ GIDMap: nil,
}
- layerOptions.TemplateLayer = layer.ID
- mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID)
+ } else {
+ layerOptions.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: options.HostUIDMapping,
+ HostGIDMapping: options.HostGIDMapping,
+ UIDMap: copySlicePreferringNil(options.UIDMap),
+ GIDMap: copySlicePreferringNil(options.GIDMap),
}
- if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
- if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
- err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
- }
- return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
+ }
+ layerOptions.TemplateLayer = layer.ID
+ mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
+ if err != nil {
+ return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
+ }
+ // By construction, createMappedLayer can only be true if ristore == s.imageStore.
+ if err = s.imageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
+ if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
+ err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
}
- layer = mappedLayer
+ return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
}
- return layer, nil
+ return mappedLayer, nil
}
-func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
- if options == nil {
- options = &ContainerOptions{}
+func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, cOptions *ContainerOptions) (*Container, error) {
+ var options ContainerOptions
+ if cOptions != nil {
+ options = *cOptions
+ options.IDMappingOptions.UIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.UIDMap)
+ options.IDMappingOptions.GIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.GIDMap)
+ options.LabelOpts = copySlicePreferringNil(cOptions.LabelOpts)
+ options.Flags = copyMapPreferringNil(cOptions.Flags)
+ options.MountOpts = copySlicePreferringNil(cOptions.MountOpts)
+ options.StorageOpt = copyMapPreferringNil(cOptions.StorageOpt)
+ options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
}
if options.HostUIDMapping {
options.UIDMap = nil
@@ -1230,13 +1773,11 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if options.HostGIDMapping {
options.GIDMap = nil
}
- rlstore, err := s.LayerStore()
+ options.Metadata = metadata
+ rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != ""
if err != nil {
return nil, err
}
- if id == "" {
- id = stringid.GenerateRandomID()
- }
var imageTopLayer *Layer
imageID := ""
@@ -1250,56 +1791,52 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
defer s.usernsLock.Unlock()
}
- var imageHomeStore ROImageStore
- var istore ImageStore
- var istores []ROImageStore
- var lstores []ROLayerStore
- var cimage *Image
+ var imageHomeStore roImageStore // Set if image != ""
+ // s.imageStore is locked read-write, if image != ""
+ // s.roImageStores are NOT NECESSARILY ALL locked read-only if image != ""
+ var cimage *Image // Set if image != ""
if image != "" {
- var err error
- lstores, err = s.ROLayerStores()
- if err != nil {
- return nil, err
- }
- istore, err = s.ImageStore()
- if err != nil {
+ if err := rlstore.startWriting(); err != nil {
return nil, err
}
- istores, err = s.ROImageStores()
- if err != nil {
- return nil, err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
+ defer rlstore.stopWriting()
+ for _, s := range lstores {
store := s
- if store == istore {
- store.Lock()
- } else {
- store.RLock()
- }
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return nil, err
}
- cimage, err = store.Get(image)
- if err == nil {
- imageHomeStore = store
- break
+ defer store.stopReading()
+ }
+ if err := s.imageStore.startWriting(); err != nil {
+ return nil, err
+ }
+ defer s.imageStore.stopWriting()
+ cimage, err = s.imageStore.Get(image)
+ if err == nil {
+ imageHomeStore = s.imageStore
+ } else {
+ for _, s := range s.roImageStores {
+ store := s
+ if err := store.startReading(); err != nil {
+ return nil, err
+ }
+ defer store.stopReading()
+ cimage, err = store.Get(image)
+ if err == nil {
+ imageHomeStore = store
+ break
+ }
}
}
if cimage == nil {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown)
}
imageID = cimage.ID
}
if options.AutoUserNs {
var err error
- options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage)
+ options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage, rlstore, lstores)
if err != nil {
return nil, err
}
@@ -1311,8 +1848,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
idMappingsOptions := options.IDMappingOptions
if image != "" {
if cimage.TopLayer != "" {
- createMappedLayer := imageHomeStore == istore
- ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
+ ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idMappingsOptions)
if err != nil {
return nil, err
}
@@ -1326,11 +1862,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
} else {
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ if err := rlstore.startWriting(); err != nil {
return nil, err
}
+ defer rlstore.stopWriting()
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
@@ -1338,34 +1873,33 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
gidMap = s.gidMap
}
}
- var layerOptions *LayerOptions
+ layerOptions := &LayerOptions{
+ // Normally layers for containers are volatile only if the container is.
+ // But in transient store mode, all container layers are volatile.
+ Volatile: options.Volatile || s.transientStore,
+ }
if s.canUseShifting(uidMap, gidMap) {
- layerOptions = &LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- UIDMap: nil,
- GIDMap: nil,
- },
+ layerOptions.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ UIDMap: nil,
+ GIDMap: nil,
}
} else {
- layerOptions = &LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: idMappingsOptions.HostUIDMapping,
- HostGIDMapping: idMappingsOptions.HostGIDMapping,
- UIDMap: copyIDMap(uidMap),
- GIDMap: copyIDMap(gidMap),
- },
+ layerOptions.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: idMappingsOptions.HostUIDMapping,
+ HostGIDMapping: idMappingsOptions.HostGIDMapping,
+ UIDMap: copySlicePreferringNil(uidMap),
+ GIDMap: copySlicePreferringNil(gidMap),
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
- plabel, _ := options.Flags["ProcessLabel"].(string)
- mlabel, _ := options.Flags["MountLabel"].(string)
- if (plabel == "" && mlabel != "") ||
- (plabel != "" && mlabel == "") {
- return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
+ plabel, _ := options.Flags[processLabelFlag].(string)
+ mlabel, _ := options.Flags[mountLabelFlag].(string)
+ if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") {
+ return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
@@ -1373,381 +1907,243 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- options.Flags["ProcessLabel"] = processLabel
- options.Flags["MountLabel"] = mountLabel
+ mlabel = mountLabel
+ options.Flags[processLabelFlag] = processLabel
+ options.Flags[mountLabelFlag] = mountLabel
}
- clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true)
+ clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
if err != nil {
return nil, err
}
layer = clayer.ID
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- options.IDMappingOptions = types.IDMappingOptions{
- HostUIDMapping: len(options.UIDMap) == 0,
- HostGIDMapping: len(options.GIDMap) == 0,
- UIDMap: copyIDMap(options.UIDMap),
- GIDMap: copyIDMap(options.GIDMap),
- }
- container, err := rcstore.Create(id, names, imageID, layer, metadata, options)
- if err != nil || container == nil {
- rlstore.Delete(layer)
- }
- return container, err
+
+ // Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile
+ if s.transientStore {
+ options.Volatile = true
+ }
+
+ return writeToContainerStore(s, func() (*Container, error) {
+ options.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: len(options.UIDMap) == 0,
+ HostGIDMapping: len(options.GIDMap) == 0,
+ UIDMap: copySlicePreferringNil(options.UIDMap),
+ GIDMap: copySlicePreferringNil(options.GIDMap),
+ }
+ container, err := s.containerStore.create(id, names, imageID, layer, &options)
+ if err != nil || container == nil {
+ if err2 := rlstore.Delete(layer); err2 != nil {
+ if err == nil {
+ err = fmt.Errorf("deleting layer %#v: %w", layer, err2)
+ } else {
+ logrus.Errorf("While recovering from a failure to create a container, error deleting layer %#v: %v", layer, err2)
+ }
+ }
+ }
+ return container, err
+ })
}
func (s *store) SetMetadata(id, metadata string) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
-
- if rlstore.Exists(id) {
- return rlstore.SetMetadata(id, metadata)
- }
- if ristore.Exists(id) {
- return ristore.SetMetadata(id, metadata)
- }
- if rcstore.Exists(id) {
- return rcstore.SetMetadata(id, metadata)
- }
- return ErrNotAnID
+ return s.writeToAllStores(func(rlstore rwLayerStore) error {
+ if rlstore.Exists(id) {
+ return rlstore.SetMetadata(id, metadata)
+ }
+ if s.imageStore.Exists(id) {
+ return s.imageStore.SetMetadata(id, metadata)
+ }
+ if s.containerStore.Exists(id) {
+ return s.containerStore.SetMetadata(id, metadata)
+ }
+ return ErrNotAnID
+ })
}
func (s *store) Metadata(id string) (string, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return "", err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return "", err
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return "", err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) {
if store.Exists(id) {
- return store.Metadata(id)
+ res, err := store.Metadata(id)
+ return res, true, err
}
+ return "", false, nil
+ }); done {
+ return res, err
}
- istore, err := s.ImageStore()
- if err != nil {
- return "", err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return "", err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return "", err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if store.Exists(id) {
- return store.Metadata(id)
+ res, err := store.Metadata(id)
+ return res, true, err
}
+ return "", false, nil
+ }); done {
+ return res, err
}
- cstore, err := s.ContainerStore()
- if err != nil {
- return "", err
- }
- cstore.RLock()
- defer cstore.Unlock()
- if err := cstore.ReloadIfChanged(); err != nil {
- return "", err
- }
- if cstore.Exists(id) {
- return cstore.Metadata(id)
+ if res, done, err := readContainerStore(s, func() (string, bool, error) {
+ if s.containerStore.Exists(id) {
+ res, err := s.containerStore.Metadata(id)
+ return res, true, err
+ }
+ return "", false, nil
+ }); done {
+ return res, err
}
+
return "", ErrNotAnID
}
func (s *store) ListImageBigData(id string) ([]string, error) {
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) {
bigDataNames, err := store.BigDataNames(id)
if err == nil {
- return bigDataNames, err
+ return bigDataNames, true, nil
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
- istore, err := s.ImageStore()
- if err != nil {
- return -1, err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return -1, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return -1, err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (int64, bool, error) {
size, err := store.BigDataSize(id, key)
if err == nil {
- return size, nil
+ return size, true, nil
}
+ return -1, false, nil
+ }); done {
+ if err != nil {
+ return -1, err
+ }
+ return res, nil
}
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
- ristore, err := s.ImageStore()
- if err != nil {
- return "", err
- }
- stores, err := s.ROImageStores()
- if err != nil {
- return "", err
- }
- stores = append([]ROImageStore{ristore}, stores...)
- for _, r := range stores {
- ristore := r
- ristore.RLock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return "", err
- }
+ if res, done, err := readAllImageStores(s, func(ristore roImageStore) (digest.Digest, bool, error) {
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
- return d, nil
+ return d, true, nil
}
+ return "", false, nil
+ }); done {
+ return res, err
}
return "", ErrDigestUnknown
}
func (s *store) ImageBigData(id, key string) ([]byte, error) {
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
foundImage := false
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]byte, bool, error) {
data, err := store.BigData(id, key)
if err == nil {
- return data, nil
+ return data, true, nil
}
if store.Exists(id) {
foundImage = true
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
if foundImage {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id)
+ return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// ListLayerBigData retrieves a list of the (possibly large) chunks of
// named data associated with an layer.
func (s *store) ListLayerBigData(id string) ([]string, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
- }
foundLayer := false
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) {
data, err := store.BigDataNames(id)
if err == nil {
- return data, nil
+ return data, true, nil
}
if store.Exists(id) {
foundLayer = true
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id)
+ return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// LayerBigData retrieves a (possibly large) chunk of named data
// associated with a layer.
func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
- }
foundLayer := false
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (io.ReadCloser, bool, error) {
data, err := store.BigData(id, key)
if err == nil {
- return data, nil
+ return data, true, nil
}
if store.Exists(id) {
foundLayer = true
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id)
+ return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// SetLayerBigData stores a (possibly large) chunk of named data
// associated with a layer.
func (s *store) SetLayerBigData(id, key string, data io.Reader) error {
- store, err := s.LayerStore()
- if err != nil {
- return err
- }
-
- store.Lock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return err
- }
- return store.SetBigData(id, key, data)
+ _, err := writeToLayerStore(s, func(store rwLayerStore) (struct{}, error) {
+ return struct{}{}, store.SetBigData(id, key, data)
+ })
+ return err
}
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
-
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
-
- return ristore.SetBigData(id, key, data, digestManifest)
+ _, err := writeToImageStore(s, func() (struct{}, error) {
+ return struct{}{}, s.imageStore.SetBigData(id, key, data, digestManifest)
+ })
+ return err
}
func (s *store) ImageSize(id string) (int64, error) {
- var image *Image
-
- lstore, err := s.LayerStore()
- if err != nil {
- return -1, errors.Wrapf(err, "error loading primary layer store data")
- }
- lstores, err := s.ROLayerStores()
+ layerStores, err := s.allLayerStores()
if err != nil {
- return -1, errors.Wrapf(err, "error loading additional layer stores")
+ return -1, err
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range layerStores {
store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return -1, err
}
- }
-
- var imageStore ROBigDataStore
- istore, err := s.ImageStore()
- if err != nil {
- return -1, errors.Wrapf(err, "error loading primary image store data")
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return -1, errors.Wrapf(err, "error loading additional image stores")
+ defer store.stopReading()
}
// Look for the image's record.
- for _, s := range append([]ROImageStore{istore}, istores...) {
+ var imageStore roBigDataStore
+ var image *Image
+ for _, s := range s.allImageStores() {
store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return -1, err
}
+ defer store.stopReading()
if image, err = store.Get(id); err == nil {
imageStore = store
break
}
}
if image == nil {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// Start with a list of the image's top layers, if it has any.
@@ -1768,24 +2164,24 @@ func (s *store) ImageSize(id string) (int64, error) {
}
visited[layerID] = struct{}{}
// Look for the layer and the store that knows about it.
- var layerStore ROLayerStore
+ var layerStore roLayerStore
var layer *Layer
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, store := range layerStores {
if layer, err = store.Get(layerID); err == nil {
layerStore = store
break
}
}
if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown)
}
// The UncompressedSize is only valid if there's a digest to go with it.
n := layer.UncompressedSize
- if layer.UncompressedDigest == "" {
+ if layer.UncompressedDigest == "" || n == -1 {
// Compute the size.
n, err = layerStore.DiffSize("", layer.ID)
if err != nil {
- return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
+ return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err)
}
}
// Count this layer.
@@ -1800,12 +2196,12 @@ func (s *store) ImageSize(id string) (int64, error) {
// Count big data items.
names, err := imageStore.BigDataNames(id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
+ return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err)
}
for _, name := range names {
n, err := imageStore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
+ return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err)
}
size += n
}
@@ -1814,21 +2210,16 @@ func (s *store) ImageSize(id string) (int64, error) {
}
func (s *store) ContainerSize(id string) (int64, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return -1, err
- }
- lstores, err := s.ROLayerStores()
+ layerStores, err := s.allLayerStores()
if err != nil {
return -1, err
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, s := range layerStores {
store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return -1, err
}
+ defer store.stopReading()
}
// Get the location of the container directory and container run directory.
@@ -1842,835 +2233,623 @@ func (s *store) ContainerSize(id string) (int64, error) {
return -1, err
}
- rcstore, err := s.ContainerStore()
- if err != nil {
- return -1, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return -1, err
- }
+ return writeToContainerStore(s, func() (int64, error) { // Yes, s.containerStore.BigDataSize requires a write lock.
+ // Read the container record.
+ container, err := s.containerStore.Get(id)
+ if err != nil {
+ return -1, err
+ }
- // Read the container record.
- container, err := rcstore.Get(id)
- if err != nil {
- return -1, err
- }
+ // Read the container's layer's size.
+ var layer *Layer
+ var size int64
+ for _, store := range layerStores {
+ if layer, err = store.Get(container.LayerID); err == nil {
+ size, err = store.DiffSize("", layer.ID)
+ if err != nil {
+ return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
+ }
+ break
+ }
+ }
+ if layer == nil {
+ return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
+ }
- // Read the container's layer's size.
- var layer *Layer
- var size int64
- for _, store := range append([]ROLayerStore{lstore}, lstores...) {
- if layer, err = store.Get(container.LayerID); err == nil {
- size, err = store.DiffSize("", layer.ID)
+ // Count big data items.
+ names, err := s.containerStore.BigDataNames(id)
+ if err != nil {
+ return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
+ }
+ for _, name := range names {
+ n, err := s.containerStore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
+ return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
}
- break
+ size += n
}
- }
- if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
- }
- // Count big data items.
- names, err := rcstore.BigDataNames(id)
- if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
- }
- for _, name := range names {
- n, err := rcstore.BigDataSize(id, name)
+ // Count the size of our container directory and container run directory.
+ n, err := directory.Size(cdir)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
+ return -1, err
+ }
+ size += n
+ n, err = directory.Size(rdir)
+ if err != nil {
+ return -1, err
}
size += n
- }
-
- // Count the size of our container directory and container run directory.
- n, err := directory.Size(cdir)
- if err != nil {
- return -1, err
- }
- size += n
- n, err = directory.Size(rdir)
- if err != nil {
- return -1, err
- }
- size += n
- return size, nil
+ return size, nil
+ })
}
func (s *store) ListContainerBigData(id string) ([]string, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
-
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
-
- return rcstore.BigDataNames(id)
-}
+ res, _, err := readContainerStore(s, func() ([]string, bool, error) {
+ res, err := s.containerStore.BigDataNames(id)
+ return res, true, err
+ })
+ return res, err
+}
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return -1, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return -1, err
- }
- return rcstore.BigDataSize(id, key)
+ return writeToContainerStore(s, func() (int64, error) { // Yes, BigDataSize requires a write lock.
+ return s.containerStore.BigDataSize(id, key)
+ })
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return "", err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return "", err
- }
- return rcstore.BigDataDigest(id, key)
+ return writeToContainerStore(s, func() (digest.Digest, error) { // Yes, BigDataDigest requires a write lock.
+ return s.containerStore.BigDataDigest(id, key)
+ })
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- return rcstore.BigData(id, key)
+ res, _, err := readContainerStore(s, func() ([]byte, bool, error) {
+ res, err := s.containerStore.BigData(id, key)
+ return res, true, err
+ })
+ return res, err
}
func (s *store) SetContainerBigData(id, key string, data []byte) error {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
- return rcstore.SetBigData(id, key, data)
+ _, err := writeToContainerStore(s, func() (struct{}, error) {
+ return struct{}{}, s.containerStore.SetBigData(id, key, data)
+ })
+ return err
}
func (s *store) Exists(id string) bool {
- lstore, err := s.LayerStore()
- if err != nil {
- return false
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return false
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return false
- }
+ found, _, err := readAllLayerStores(s, func(store roLayerStore) (bool, bool, error) {
if store.Exists(id) {
- return true
+ return true, true, nil
}
- }
-
- istore, err := s.ImageStore()
+ return false, false, nil
+ })
if err != nil {
return false
}
- istores, err := s.ROImageStores()
- if err != nil {
- return false
+ if found {
+ return true
}
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return false
- }
+
+ found, _, err = readAllImageStores(s, func(store roImageStore) (bool, bool, error) {
if store.Exists(id) {
- return true
+ return true, true, nil
}
- }
-
- rcstore, err := s.ContainerStore()
+ return false, false, nil
+ })
if err != nil {
return false
}
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return false
- }
- if rcstore.Exists(id) {
+ if found {
return true
}
- return false
+ found, _, err = readContainerStore(s, func() (bool, bool, error) {
+ return s.containerStore.Exists(id), true, nil
+ })
+ if err != nil {
+ return false
+ }
+ return found
}
-func dedupeNames(names []string) []string {
- seen := make(map[string]bool)
+func dedupeStrings(names []string) []string {
+ seen := make(map[string]struct{})
deduped := make([]string, 0, len(names))
for _, name := range names {
if _, wasSeen := seen[name]; !wasSeen {
- seen[name] = true
+ seen[name] = struct{}{}
deduped = append(deduped, name)
}
}
return deduped
}
+func dedupeDigests(digests []digest.Digest) []digest.Digest {
+ seen := make(map[digest.Digest]struct{})
+ deduped := make([]digest.Digest, 0, len(digests))
+ for _, d := range digests {
+ if _, wasSeen := seen[d]; !wasSeen {
+ seen[d] = struct{}{}
+ deduped = append(deduped, d)
+ }
+ }
+ return deduped
+}
+
+// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (s *store) SetNames(id string, names []string) error {
- deduped := dedupeNames(names)
+ return s.updateNames(id, names, setNames)
+}
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- if rlstore.Exists(id) {
- return rlstore.SetNames(id, deduped)
- }
+func (s *store) AddNames(id string, names []string) error {
+ return s.updateNames(id, names, addNames)
+}
- ristore, err := s.ImageStore()
- if err != nil {
+func (s *store) RemoveNames(id string, names []string) error {
+ return s.updateNames(id, names, removeNames)
+}
+
+func (s *store) updateNames(id string, names []string, op updateNameOperation) error {
+ deduped := dedupeStrings(names)
+
+ if found, err := writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) {
+ if !rlstore.Exists(id) {
+ return false, nil
+ }
+ return true, rlstore.updateNames(id, deduped, op)
+ }); err != nil || found {
return err
}
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
+
+ if err := s.imageStore.startWriting(); err != nil {
return err
}
- if ristore.Exists(id) {
- return ristore.SetNames(id, deduped)
+ defer s.imageStore.stopWriting()
+ if s.imageStore.Exists(id) {
+ return s.imageStore.updateNames(id, deduped, op)
}
- // Check is id refers to a RO Store
- ristores, err := s.ROImageStores()
- if err != nil {
- return err
- }
- for _, s := range ristores {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ // Check if the id refers to a read-only image store -- we want to allow images in
+ // read-only stores to have their names changed.
+ for _, is := range s.roImageStores {
+ store := is
+ if err := store.startReading(); err != nil {
return err
}
+ defer store.stopReading()
if i, err := store.Get(id); err == nil {
- if len(deduped) > 1 {
- // Do not want to create image name in R/W storage
- deduped = deduped[1:]
+ // "pull up" the image so that we can change its names list
+ options := ImageOptions{
+ CreationDate: i.Created,
+ Digest: i.Digest,
+ Digests: copySlicePreferringNil(i.Digests),
+ Metadata: i.Metadata,
+ NamesHistory: copySlicePreferringNil(i.NamesHistory),
+ Flags: copyMapPreferringNil(i.Flags),
}
- _, err := ristore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest)
- if err == nil {
- return ristore.Save()
+ for _, key := range i.BigDataNames {
+ data, err := store.BigData(id, key)
+ if err != nil {
+ return err
+ }
+ dataDigest, err := store.BigDataDigest(id, key)
+ if err != nil {
+ return err
+ }
+ options.BigData = append(options.BigData, ImageBigDataOption{
+ Key: key,
+ Data: data,
+ Digest: dataDigest,
+ })
}
- return err
+ _, err = s.imageStore.create(id, i.Names, i.TopLayer, options)
+ if err != nil {
+ return err
+ }
+ // now make the changes to the writeable image record's names list
+ return s.imageStore.updateNames(id, deduped, op)
}
}
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
+ if found, err := writeToContainerStore(s, func() (bool, error) {
+ if !s.containerStore.Exists(id) {
+ return false, nil
+ }
+ return true, s.containerStore.updateNames(id, deduped, op)
+ }); err != nil || found {
return err
}
- if rcstore.Exists(id) {
- return rcstore.SetNames(id, deduped)
- }
+
return ErrLayerUnknown
}
func (s *store) Names(id string) ([]string, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) {
if l, err := store.Get(id); l != nil && err == nil {
- return l.Names, nil
+ return l.Names, true, nil
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) {
if i, err := store.Get(id); i != nil && err == nil {
- return i.Names, nil
+ return i.Names, true, nil
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- if c, err := rcstore.Get(id); c != nil && err == nil {
- return c.Names, nil
+ if res, done, err := readContainerStore(s, func() ([]string, bool, error) {
+ if c, err := s.containerStore.Get(id); c != nil && err == nil {
+ return c.Names, true, nil
+ }
+ return nil, false, nil
+ }); done {
+ return res, err
}
+
return nil, ErrLayerUnknown
}
func (s *store) Lookup(name string) (string, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return "", err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return "", err
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return "", err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) {
if l, err := store.Get(name); l != nil && err == nil {
- return l.ID, nil
+ return l.ID, true, nil
}
+ return "", false, nil
+ }); done {
+ return res, err
}
- istore, err := s.ImageStore()
- if err != nil {
- return "", err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return "", err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return "", err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if i, err := store.Get(name); i != nil && err == nil {
- return i.ID, nil
+ return i.ID, true, nil
}
+ return "", false, nil
+ }); done {
+ return res, err
}
- cstore, err := s.ContainerStore()
- if err != nil {
- return "", err
- }
- cstore.RLock()
- defer cstore.Unlock()
- if err := cstore.ReloadIfChanged(); err != nil {
- return "", err
- }
- if c, err := cstore.Get(name); c != nil && err == nil {
- return c.ID, nil
+ if res, done, err := readContainerStore(s, func() (string, bool, error) {
+ if c, err := s.containerStore.Get(name); c != nil && err == nil {
+ return c.ID, true, nil
+ }
+ return "", false, nil
+ }); done {
+ return res, err
}
return "", ErrLayerUnknown
}
func (s *store) DeleteLayer(id string) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
-
- if rlstore.Exists(id) {
- if l, err := rlstore.Get(id); err != nil {
- id = l.ID
- }
- layers, err := rlstore.Layers()
- if err != nil {
- return err
- }
- for _, layer := range layers {
- if layer.Parent == id {
- return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID)
+ return s.writeToAllStores(func(rlstore rwLayerStore) error {
+ if rlstore.Exists(id) {
+ if l, err := rlstore.Get(id); err != nil {
+ id = l.ID
+ }
+ layers, err := rlstore.Layers()
+ if err != nil {
+ return err
+ }
+ for _, layer := range layers {
+ if layer.Parent == id {
+ return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren)
+ }
+ }
+ images, err := s.imageStore.Images()
+ if err != nil {
+ return err
}
- }
- images, err := ristore.Images()
- if err != nil {
- return err
- }
- for _, image := range images {
- if image.TopLayer == id {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ for _, image := range images {
+ if image.TopLayer == id {
+ return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
+ }
+ }
+ containers, err := s.containerStore.Containers()
+ if err != nil {
+ return err
}
- if stringutils.InSlice(image.MappedTopLayers, id) {
- // No write access to the image store, fail before the layer is deleted
- if _, ok := ristore.(*imageStore); !ok {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ for _, container := range containers {
+ if container.LayerID == id {
+ return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer)
}
}
- }
- containers, err := rcstore.Containers()
- if err != nil {
- return err
- }
- for _, container := range containers {
- if container.LayerID == id {
- return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID)
+ if err := rlstore.Delete(id); err != nil {
+ return fmt.Errorf("delete layer %v: %w", id, err)
}
- }
- if err := rlstore.Delete(id); err != nil {
- return errors.Wrapf(err, "delete layer %v", id)
- }
- // The check here is used to avoid iterating the images if we don't need to.
- // There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers.
- if istore, ok := ristore.(*imageStore); ok {
for _, image := range images {
if stringutils.InSlice(image.MappedTopLayers, id) {
- if err = istore.removeMappedTopLayer(image.ID, id); err != nil {
- return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID)
+ if err = s.imageStore.removeMappedTopLayer(image.ID, id); err != nil {
+ return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
}
}
}
+ return nil
}
- return nil
- }
- return ErrNotALayer
+ return ErrNotALayer
+ })
}
func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) {
- rlstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
layersToRemove := []string{}
- if ristore.Exists(id) {
- image, err := ristore.Get(id)
- if err != nil {
- return nil, err
- }
- id = image.ID
- containers, err := rcstore.Containers()
- if err != nil {
- return nil, err
- }
- aContainerByImage := make(map[string]string)
- for _, container := range containers {
- aContainerByImage[container.ImageID] = container.ID
- }
- if container, ok := aContainerByImage[id]; ok {
- return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
- }
- images, err := ristore.Images()
- if err != nil {
- return nil, err
- }
- layers, err := rlstore.Layers()
- if err != nil {
- return nil, err
- }
- childrenByParent := make(map[string]*[]string)
- for _, layer := range layers {
- parent := layer.Parent
- if list, ok := childrenByParent[parent]; ok {
- newList := append(*list, layer.ID)
- childrenByParent[parent] = &newList
- } else {
- childrenByParent[parent] = &([]string{layer.ID})
- }
- }
- otherImagesByTopLayer := make(map[string]string)
- for _, img := range images {
- if img.ID != id {
- otherImagesByTopLayer[img.TopLayer] = img.ID
- for _, layerID := range img.MappedTopLayers {
- otherImagesByTopLayer[layerID] = img.ID
+ if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
+ // Delete image from all available imagestores configured to be used.
+ imageFound := false
+ for _, is := range s.rwImageStores {
+ if is != s.imageStore {
+ // This is an additional writeable image store
+ // so we must perform lock
+ if err := is.startWriting(); err != nil {
+ return err
}
+ defer is.stopWriting()
}
- }
- if commit {
- if err = ristore.Delete(id); err != nil {
- return nil, err
+ if !is.Exists(id) {
+ continue
}
- }
- layer := image.TopLayer
- lastRemoved := ""
- for layer != "" {
- if rcstore.Exists(layer) {
- break
+ imageFound = true
+ image, err := is.Get(id)
+ if err != nil {
+ return err
}
- if _, ok := otherImagesByTopLayer[layer]; ok {
- break
+ id = image.ID
+ containers, err := s.containerStore.Containers()
+ if err != nil {
+ return err
+ }
+ aContainerByImage := make(map[string]string)
+ for _, container := range containers {
+ aContainerByImage[container.ImageID] = container.ID
+ }
+ if container, ok := aContainerByImage[id]; ok {
+ return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer)
+ }
+ images, err := is.Images()
+ if err != nil {
+ return err
+ }
+ layers, err := rlstore.Layers()
+ if err != nil {
+ return err
+ }
+ childrenByParent := make(map[string][]string)
+ for _, layer := range layers {
+ childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID)
+ }
+ otherImagesTopLayers := make(map[string]struct{})
+ for _, img := range images {
+ if img.ID != id {
+ otherImagesTopLayers[img.TopLayer] = struct{}{}
+ for _, layerID := range img.MappedTopLayers {
+ otherImagesTopLayers[layerID] = struct{}{}
+ }
+ }
}
- parent := ""
- if l, err := rlstore.Get(layer); err == nil {
- parent = l.Parent
+ if commit {
+ if err = is.Delete(id); err != nil {
+ return err
+ }
}
- hasOtherRefs := func() bool {
- layersToCheck := []string{layer}
- if layer == image.TopLayer {
- layersToCheck = append(layersToCheck, image.MappedTopLayers...)
+ layer := image.TopLayer
+ layersToRemoveMap := make(map[string]struct{})
+ layersToRemove = append(layersToRemove, image.MappedTopLayers...)
+ for _, mappedTopLayer := range image.MappedTopLayers {
+ layersToRemoveMap[mappedTopLayer] = struct{}{}
+ }
+ for layer != "" {
+ if s.containerStore.Exists(layer) {
+ break
+ }
+ if _, used := otherImagesTopLayers[layer]; used {
+ break
+ }
+ parent := ""
+ if l, err := rlstore.Get(layer); err == nil {
+ parent = l.Parent
}
- for _, layer := range layersToCheck {
- if childList, ok := childrenByParent[layer]; ok && childList != nil {
- children := *childList
- for _, child := range children {
- if child != lastRemoved {
+ hasChildrenNotBeingRemoved := func() bool {
+ layersToCheck := []string{layer}
+ if layer == image.TopLayer {
+ layersToCheck = append(layersToCheck, image.MappedTopLayers...)
+ }
+ for _, layer := range layersToCheck {
+ if childList := childrenByParent[layer]; len(childList) > 0 {
+ for _, child := range childList {
+ if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval {
+ continue
+ }
return true
}
}
}
+ return false
}
- return false
- }
- if hasOtherRefs() {
- break
- }
- lastRemoved = layer
- if layer == image.TopLayer {
- layersToRemove = append(layersToRemove, image.MappedTopLayers...)
+ if hasChildrenNotBeingRemoved() {
+ break
+ }
+ layersToRemove = append(layersToRemove, layer)
+ layersToRemoveMap[layer] = struct{}{}
+ layer = parent
}
- layersToRemove = append(layersToRemove, lastRemoved)
- layer = parent
}
- } else {
- return nil, ErrNotAnImage
- }
- if commit {
- for _, layer := range layersToRemove {
- if err = rlstore.Delete(layer); err != nil {
- return nil, err
+ if !imageFound {
+ return ErrNotAnImage
+ }
+ if commit {
+ for _, layer := range layersToRemove {
+ if err = rlstore.Delete(layer); err != nil {
+ return err
+ }
}
}
+ return nil
+ }); err != nil {
+ return nil, err
}
return layersToRemove, nil
}
func (s *store) DeleteContainer(id string) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
+ return s.writeToAllStores(func(rlstore rwLayerStore) error {
+ if !s.containerStore.Exists(id) {
+ return ErrNotAContainer
+ }
- if rcstore.Exists(id) {
- if container, err := rcstore.Get(id); err == nil {
- errChan := make(chan error)
- var wg sync.WaitGroup
+ container, err := s.containerStore.Get(id)
+ if err != nil {
+ return ErrNotAContainer
+ }
- if rlstore.Exists(container.LayerID) {
- wg.Add(1)
- go func() {
- errChan <- rlstore.Delete(container.LayerID)
- wg.Done()
- }()
+ // delete the layer first, separately, so that if we get an
+ // error while trying to do so, we don't go ahead and delete
+ // the container record that refers to it, effectively losing
+ // track of it
+ if rlstore.Exists(container.LayerID) {
+ if err := rlstore.Delete(container.LayerID); err != nil {
+ return err
}
- wg.Add(1)
- go func() {
- errChan <- rcstore.Delete(id)
- wg.Done()
- }()
+ }
- middleDir := s.graphDriverName + "-containers"
+ var wg multierror.Group
+
+ middleDir := s.graphDriverName + "-containers"
+
+ wg.Go(func() error {
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
- wg.Add(1)
- go func() {
- errChan <- os.RemoveAll(gcpath)
- wg.Done()
- }()
+ return system.EnsureRemoveAll(gcpath)
+ })
+ wg.Go(func() error {
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
- wg.Add(1)
- go func() {
- errChan <- os.RemoveAll(rcpath)
- wg.Done()
- }()
-
- go func() {
- wg.Wait()
- close(errChan)
- }()
-
- var errors []error
- for {
- select {
- case err, ok := <-errChan:
- if !ok {
- return multierror.Append(nil, errors...).ErrorOrNil()
- }
- if err != nil {
- errors = append(errors, err)
- }
- }
- }
+ return system.EnsureRemoveAll(rcpath)
+ })
+
+ if multierr := wg.Wait(); multierr != nil {
+ return multierr.ErrorOrNil()
}
- }
- return ErrNotAContainer
+ return s.containerStore.Delete(id)
+ })
}
func (s *store) Delete(id string) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
-
- if rcstore.Exists(id) {
- if container, err := rcstore.Get(id); err == nil {
- if rlstore.Exists(container.LayerID) {
- if err = rlstore.Delete(container.LayerID); err != nil {
- return err
- }
- if err = rcstore.Delete(id); err != nil {
- return err
- }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata")
- if err = os.RemoveAll(gcpath); err != nil {
- return err
- }
- rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata")
- if err = os.RemoveAll(rcpath); err != nil {
- return err
+ return s.writeToAllStores(func(rlstore rwLayerStore) error {
+ if s.containerStore.Exists(id) {
+ if container, err := s.containerStore.Get(id); err == nil {
+ if rlstore.Exists(container.LayerID) {
+ if err = rlstore.Delete(container.LayerID); err != nil {
+ return err
+ }
+ if err = s.containerStore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata")
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata")
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
}
- return nil
+ return ErrNotALayer
}
- return ErrNotALayer
}
- }
- if ristore.Exists(id) {
- return ristore.Delete(id)
- }
- if rlstore.Exists(id) {
- return rlstore.Delete(id)
- }
- return ErrLayerUnknown
+ if s.imageStore.Exists(id) {
+ return s.imageStore.Delete(id)
+ }
+ if rlstore.Exists(id) {
+ return rlstore.Delete(id)
+ }
+ return ErrLayerUnknown
+ })
}
func (s *store) Wipe() error {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return err
- }
- ristore, err := s.ImageStore()
- if err != nil {
- return err
- }
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return err
- }
- ristore.Lock()
- defer ristore.Unlock()
- if err := ristore.ReloadIfChanged(); err != nil {
- return err
- }
- rcstore.Lock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return err
- }
-
- if err = rcstore.Wipe(); err != nil {
- return err
- }
- if err = ristore.Wipe(); err != nil {
- return err
- }
- return rlstore.Wipe()
+ return s.writeToAllStores(func(rlstore rwLayerStore) error {
+ if err := s.containerStore.Wipe(); err != nil {
+ return err
+ }
+ if err := s.imageStore.Wipe(); err != nil {
+ return err
+ }
+ return rlstore.Wipe()
+ })
}
func (s *store) Status() ([][2]string, error) {
- rlstore, err := s.LayerStore()
+ rlstore, err := s.getLayerStore()
if err != nil {
return nil, err
}
return rlstore.Status()
}
+//go:embed VERSION
+var storageVersion string
+
func (s *store) Version() ([][2]string, error) {
+ if trimmedVersion := strings.TrimSpace(storageVersion); trimmedVersion != "" {
+ return [][2]string{{"Version", trimmedVersion}}, nil
+ }
return [][2]string{}, nil
}
func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
- rlstore, err := s.LayerStore()
- if err != nil {
- return "", err
- }
-
- s.graphLock.Lock()
- defer s.graphLock.Unlock()
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
+ // in startUsingGraphDriver().
+ if err := s.startUsingGraphDriver(); err != nil {
return "", err
}
+ defer s.stopUsingGraphDriver()
- modified, err := s.graphLock.Modified()
+ rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return "", err
}
+ if options.UidMaps != nil || options.GidMaps != nil {
+ options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps)
+ }
- /* We need to make sure the home mount is present when the Mount is done. */
- if modified {
- s.graphDriver = nil
- s.layerStore = nil
- s.graphDriver, err = s.getGraphDriver()
- if err != nil {
+ // function used to have a scope for rlstore.StopWriting()
+ tryMount := func() (string, error) {
+ if err := rlstore.startWriting(); err != nil {
return "", err
}
- s.lastLoaded = time.Now()
+ defer rlstore.stopWriting()
+ if rlstore.Exists(id) {
+ return rlstore.Mount(id, options)
+ }
+ return "", nil
+ }
+ mountPoint, err := tryMount()
+ if mountPoint != "" || err != nil {
+ return mountPoint, err
}
- if rlstore.Exists(id) {
- return rlstore.Mount(id, options)
+ // check if the layer is in a read-only store, and return a better error message
+ for _, store := range lstores {
+ if err := store.startReading(); err != nil {
+ return "", err
+ }
+ exists := store.Exists(id)
+ store.stopReading()
+ if exists {
+ return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
+ }
}
+
return "", ErrLayerUnknown
}
@@ -2704,11 +2883,12 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
options.GidMaps = container.GIDMap
options.Options = container.MountOpts()
if !s.disableVolatile {
- if v, found := container.Flags["Volatile"]; found {
- options.Volatile = v.(bool)
+ if v, found := container.Flags[volatileFlag]; found {
+ if b, ok := v.(bool); ok {
+ options.Volatile = b
+ }
}
}
- options.DisableShifting = !s.canUseShifting(container.UIDMap, container.GIDMap)
}
return s.mount(id, options)
}
@@ -2717,15 +2897,14 @@ func (s *store) Mounted(id string) (int, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
- rlstore, err := s.LayerStore()
+ rlstore, err := s.getLayerStore()
if err != nil {
return 0, err
}
- rlstore.RLock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ if err := rlstore.startReading(); err != nil {
return 0, err
}
+ defer rlstore.stopReading()
return rlstore.Mounted(id)
}
@@ -2742,80 +2921,112 @@ func (s *store) Unmount(id string, force bool) (bool, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
- rlstore, err := s.LayerStore()
- if err != nil {
- return false, err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return false, err
- }
- if rlstore.Exists(id) {
- return rlstore.Unmount(id, force)
- }
- return false, ErrLayerUnknown
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) {
+ if rlstore.Exists(id) {
+ return rlstore.unmount(id, force, false)
+ }
+ return false, ErrLayerUnknown
+ })
}
func (s *store) Changes(from, to string) ([]archive.Change, error) {
- lstore, err := s.LayerStore()
- if err != nil {
+ // NaiveDiff could cause mounts to happen without a lock, so be safe
+ // and treat the .Diff operation as a Mount.
+ // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
+ // in startUsingGraphDriver().
+ if err := s.startUsingGraphDriver(); err != nil {
return nil, err
}
- lstores, err := s.ROLayerStores()
+ defer s.stopUsingGraphDriver()
+
+ rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return nil, err
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+
+ // While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs)
+ // the overlay driver requires the primary layer store to be locked RW; see
+ // drivers/overlay.Driver.getMergedDir.
+ if err := rlstore.startWriting(); err != nil {
+ return nil, err
+ }
+ if rlstore.Exists(to) {
+ res, err := rlstore.Changes(from, to)
+ rlstore.stopWriting()
+ return res, err
+ }
+ rlstore.stopWriting()
+
+ for _, s := range lstores {
store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return nil, err
}
if store.Exists(to) {
- return store.Changes(from, to)
+ res, err := store.Changes(from, to)
+ store.stopReading()
+ return res, err
}
+ store.stopReading()
}
return nil, ErrLayerUnknown
}
func (s *store) DiffSize(from, to string) (int64, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return -1, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return -1, err
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return -1, err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
if store.Exists(to) {
- return store.DiffSize(from, to)
+ res, err := store.DiffSize(from, to)
+ return res, true, err
}
+ return -1, false, nil
+ }); done {
+ if err != nil {
+ return -1, err
+ }
+ return res, nil
}
return -1, ErrLayerUnknown
}
func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
- lstore, err := s.LayerStore()
- if err != nil {
+ // NaiveDiff could cause mounts to happen without a lock, so be safe
+ // and treat the .Diff operation as a Mount.
+ // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
+ // in startUsingGraphDriver().
+ if err := s.startUsingGraphDriver(); err != nil {
return nil, err
}
- lstores, err := s.ROLayerStores()
+ defer s.stopUsingGraphDriver()
+
+ rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return nil, err
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
+
+ // While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs)
+ // the overlay driver requires the primary layer store to be locked RW; see
+ // drivers/overlay.Driver.getMergedDir.
+ if err := rlstore.startWriting(); err != nil {
+ return nil, err
+ }
+ if rlstore.Exists(to) {
+ rc, err := rlstore.Diff(from, to, options)
+ if rc != nil && err == nil {
+ wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
+ err := rc.Close()
+ rlstore.stopWriting()
+ return err
+ })
+ return wrapped, nil
+ }
+ rlstore.stopWriting()
+ return rc, err
+ }
+ rlstore.stopWriting()
+
+ for _, s := range lstores {
store := s
- store.RLock()
- if err := store.ReloadIfChanged(); err != nil {
+ if err := store.startReading(); err != nil {
return nil, err
}
if store.Exists(to) {
@@ -2823,130 +3034,104 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
if rc != nil && err == nil {
wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
- store.Unlock()
+ store.stopReading()
return err
})
return wrapped, nil
}
- store.Unlock()
+ store.stopReading()
return rc, err
}
- store.Unlock()
+ store.stopReading()
}
return nil, ErrLayerUnknown
}
-func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if modified, err := rlstore.Modified(); modified || err != nil {
- if err = rlstore.Load(); err != nil {
- return err
- }
- }
- if !rlstore.Exists(to) {
- return ErrLayerUnknown
- }
- return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
-}
-
-func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
- rlstore, err := s.LayerStore()
- if err != nil {
- return err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if modified, err := rlstore.Modified(); modified || err != nil {
- if err = rlstore.Load(); err != nil {
- return err
- }
- }
- return rlstore.CleanupStagingDirectory(stagingDirectory)
-}
-
-func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
- rlstore, err := s.LayerStore()
+func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
+ rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil {
return nil, err
}
- rlstore.Lock()
- defer rlstore.Unlock()
- if modified, err := rlstore.Modified(); modified || err != nil {
- if err = rlstore.Load(); err != nil {
- return nil, err
- }
- }
- if to != "" && !rlstore.Exists(to) {
- return nil, ErrLayerUnknown
- }
- return rlstore.ApplyDiffWithDiffer(to, options, differ)
-}
-
-func (s *store) DifferTarget(id string) (string, error) {
- rlstore, err := s.LayerStore()
- if err != nil {
- return "", err
- }
- rlstore.Lock()
- defer rlstore.Unlock()
- if modified, err := rlstore.Modified(); modified || err != nil {
- if err = rlstore.Load(); err != nil {
- return "", err
- }
- }
- if rlstore.Exists(id) {
- return rlstore.DifferTarget(id)
+ if err := rlstore.startWriting(); err != nil {
+ return nil, err
}
- return "", ErrLayerUnknown
-}
-
-func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
- rlstore, err := s.LayerStore()
- if err != nil {
- return -1, err
+ defer rlstore.stopWriting()
+
+ layer, err := rlstore.Get(args.ID)
+ if err != nil && !errors.Is(err, ErrLayerUnknown) {
+ return layer, err
}
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return -1, err
+ if err == nil {
+ // This code path exists only for cmd/containers/storage.applyDiffUsingStagingDirectory; we have tests that
+ // assume layer creation and applying a staged layer are separate steps. Production pull code always uses the
+ // other path, where layer creation is atomic.
+ return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions)
}
- if rlstore.Exists(to) {
- return rlstore.ApplyDiff(to, diff)
+
+ // if the layer doesn't exist yet, try to create it.
+
+ slo := stagedLayerOptions{
+ DiffOutput: args.DiffOutput,
+ DiffOptions: args.DiffOptions,
}
- return -1, ErrLayerUnknown
+ layer, _, err = s.putLayer(rlstore, rlstores, args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
+ return layer, err
}
-func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
- var layers []Layer
- lstore, err := s.LayerStore()
+func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
+ _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
+ return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
+ })
+ return err
+}
+
+func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+ rlstore, err := s.getLayerStore()
if err != nil {
return nil, err
}
+ return rlstore.applyDiffWithDifferNoLock(options, differ)
+}
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
+func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+ if to != "" {
+ return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter")
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
+ return s.PrepareStagedLayer(options, differ)
+}
+
+func (s *store) DifferTarget(id string) (string, error) {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) {
+ if rlstore.Exists(id) {
+ return rlstore.DifferTarget(id)
+ }
+ return "", ErrLayerUnknown
+ })
+}
+
+func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (int64, error) {
+ if rlstore.Exists(to) {
+ return rlstore.ApplyDiff(to, diff)
}
+ return -1, ErrLayerUnknown
+ })
+}
+
+func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
+ var layers []Layer
+ if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
storeLayers, err := m(store, d)
if err != nil {
- if errors.Cause(err) != ErrLayerUnknown {
- return nil, err
+ if !errors.Is(err, ErrLayerUnknown) {
+ return struct{}{}, true, err
}
- continue
+ return struct{}{}, false, nil
}
layers = append(layers, storeLayers...)
+ return struct{}{}, false, nil
+ }); err != nil {
+ return nil, err
}
if len(layers) == 0 {
return nil, ErrLayerUnknown
@@ -2956,51 +3141,50 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err)
}
- return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
+ return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err)
}
- return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
+ return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
-func (s *store) LayerSize(id string) (int64, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return -1, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return -1, err
+func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
+ if err := d.Validate(); err != nil {
+ return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err)
}
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return -1, err
- }
+ return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d)
+}
+
+func (s *store) LayerSize(id string) (int64, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
if store.Exists(id) {
- return store.Size(id)
+ res, err := store.Size(id)
+ return res, true, err
+ }
+ return -1, false, nil
+ }); done {
+ if err != nil {
+ return -1, err
}
+ return res, nil
}
return -1, ErrLayerUnknown
}
func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
- rlstore, err := s.LayerStore()
+ rlstore, err := s.getLayerStore()
if err != nil {
return nil, nil, err
}
- rlstore.RLock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ if err := rlstore.startReading(); err != nil {
return nil, nil, err
}
+ defer rlstore.stopReading()
if rlstore.Exists(id) {
return rlstore.ParentOwners(id)
}
@@ -3008,25 +3192,19 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
}
func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
- rlstore, err := s.LayerStore()
- if err != nil {
- return nil, nil, err
- }
- rcstore, err := s.ContainerStore()
+ rlstore, err := s.getLayerStore()
if err != nil {
return nil, nil, err
}
- rlstore.RLock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ if err := rlstore.startReading(); err != nil {
return nil, nil, err
}
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
+ defer rlstore.stopReading()
+ if err := s.containerStore.startReading(); err != nil {
return nil, nil, err
}
- container, err := rcstore.Get(id)
+ defer s.containerStore.stopReading()
+ container, err := s.containerStore.Get(id)
if err != nil {
return nil, nil, err
}
@@ -3037,112 +3215,74 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
}
func (s *store) Layers() ([]Layer, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- if err := lstore.LoadLocked(); err != nil {
- return nil, err
- }
- layers, err := lstore.Layers()
- if err != nil {
- return nil, err
- }
-
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
- }
-
- for _, s := range lstores {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ var layers []Layer
+ if _, done, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
storeLayers, err := store.Layers()
if err != nil {
- return nil, err
+ return struct{}{}, true, err
}
layers = append(layers, storeLayers...)
+ return struct{}{}, false, nil
+ }); done {
+ return nil, err
}
return layers, nil
}
func (s *store) Images() ([]Image, error) {
var images []Image
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
-
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
storeImages, err := store.Images()
if err != nil {
- return nil, err
+ return struct{}{}, true, err
}
images = append(images, storeImages...)
+ return struct{}{}, false, nil
+ }); err != nil {
+ return nil, err
}
return images, nil
}
func (s *store) Containers() ([]Container, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
-
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
-
- return rcstore.Containers()
+ res, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+ res, err := s.containerStore.Containers()
+ return res, true, err
+ })
+ return res, err
}
func (s *store) Layer(id string) (*Layer, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return nil, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROLayerStore{lstore}, lstores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (*Layer, bool, error) {
layer, err := store.Get(id)
if err == nil {
- return layer, nil
+ return layer, true, nil
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
return nil, ErrLayerUnknown
}
-func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) {
- adriver, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver)
- if !ok {
- return nil, ErrLayerUnknown
+func (s *store) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error) {
+ var adriver drivers.AdditionalLayerStoreDriver
+ if err := func() error { // A scope for defer
+ if err := s.startUsingGraphDriver(); err != nil {
+ return err
+ }
+ defer s.stopUsingGraphDriver()
+ a, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ adriver = a
+ return nil
+ }(); err != nil {
+ return nil, err
}
- al, err := adriver.LookupAdditionalLayer(d, imageref)
+ al, err := adriver.LookupAdditionalLayer(tocDigest, imageref)
if err != nil {
if errors.Is(err, drivers.ErrLayerUnknown) {
return nil, ErrLayerUnknown
@@ -3167,8 +3307,8 @@ type additionalLayer struct {
s *store
}
-func (al *additionalLayer) UncompressedDigest() digest.Digest {
- return al.layer.UncompressedDigest
+func (al *additionalLayer) TOCDigest() digest.Digest {
+ return al.layer.TOCDigest
}
func (al *additionalLayer) CompressedSize() int64 {
@@ -3176,29 +3316,23 @@ func (al *additionalLayer) CompressedSize() int64 {
}
func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) {
- rlstore, err := al.s.LayerStore()
+ rlstore, rlstores, err := al.s.bothLayerStoreKinds()
if err != nil {
return nil, err
}
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- rlstores, err := al.s.ROLayerStores()
- if err != nil {
+ if err := rlstore.startWriting(); err != nil {
return nil, err
}
+ defer rlstore.stopWriting()
var parentLayer *Layer
if parent != "" {
- for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
+ for _, lstore := range append([]roLayerStore{rlstore}, rlstores...) {
if lstore != rlstore {
- lstore.RLock()
- defer lstore.Unlock()
- if err := lstore.ReloadIfChanged(); err != nil {
+ if err := lstore.startReading(); err != nil {
return nil, err
}
+ defer lstore.stopReading()
}
parentLayer, err = lstore.Get(parent)
if err == nil {
@@ -3218,117 +3352,81 @@ func (al *additionalLayer) Release() {
}
func (s *store) Image(id string) (*Image, error) {
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (*Image, bool, error) {
image, err := store.Get(id)
if err == nil {
- return image, nil
+ if store != s.imageStore {
+ // found it in a read-only store - readAllImageStores() still has the writeable store locked for reading
+ if _, localErr := s.imageStore.Get(image.ID); localErr == nil {
+ // if the lookup key was a name, and we found the image in a read-only
+ // store, but we have an entry with the same ID in the read-write store,
+ // then the name was removed when we duplicated the image's
+ // record into writable storage, so we should ignore this entry
+ return nil, false, nil
+ }
+ }
+ return image, true, nil
}
+ return nil, false, nil
+ }); done {
+ return res, err
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
- images := []*Image{}
layer, err := s.Layer(id)
if err != nil {
return nil, err
}
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
-
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, s := range append([]ROImageStore{istore}, istores...) {
- store := s
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ images := []*Image{}
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
imageList, err := store.Images()
if err != nil {
- return nil, err
+ return struct{}{}, true, err
}
for _, image := range imageList {
+ image := image
if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
images = append(images, &image)
}
}
+ return struct{}{}, false, nil
+ }); err != nil {
+ return nil, err
}
return images, nil
}
func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
images := []*Image{}
-
- istore, err := s.ImageStore()
- if err != nil {
- return nil, err
- }
-
- istores, err := s.ROImageStores()
- if err != nil {
- return nil, err
- }
- for _, store := range append([]ROImageStore{istore}, istores...) {
- store.RLock()
- defer store.Unlock()
- if err := store.ReloadIfChanged(); err != nil {
- return nil, err
- }
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
imageList, err := store.ByDigest(d)
- if err != nil && errors.Cause(err) != ErrImageUnknown {
- return nil, err
+ if err != nil && !errors.Is(err, ErrImageUnknown) {
+ return struct{}{}, true, err
}
images = append(images, imageList...)
+ return struct{}{}, false, nil
+ }); err != nil {
+ return nil, err
}
return images, nil
}
func (s *store) Container(id string) (*Container, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
-
- return rcstore.Get(id)
+ res, _, err := readContainerStore(s, func() (*Container, bool, error) {
+ res, err := s.containerStore.Get(id)
+ return res, true, err
+ })
+ return res, err
}
func (s *store) ContainerLayerID(id string) (string, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return "", err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return "", err
- }
- container, err := rcstore.Get(id)
+ container, _, err := readContainerStore(s, func() (*Container, bool, error) {
+ res, err := s.containerStore.Get(id)
+ return res, true, err
+ })
if err != nil {
return "", err
}
@@ -3340,16 +3438,10 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
if err != nil {
return nil, err
}
- rcstore, err := s.ContainerStore()
- if err != nil {
- return nil, err
- }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return nil, err
- }
- containerList, err := rcstore.Containers()
+ containerList, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+ res, err := s.containerStore.Containers()
+ return res, true, err
+ })
if err != nil {
return nil, err
}
@@ -3362,53 +3454,81 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
return nil, ErrContainerUnknown
}
-func (s *store) ContainerDirectory(id string) (string, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return "", err
+func (s *store) ImageDirectory(id string) (string, error) {
+ foundImage := false
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
+ if store.Exists(id) {
+ foundImage = true
+ }
+ middleDir := s.graphDriverName + "-images"
+ gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(gipath, 0o700); err != nil {
+ return "", true, err
+ }
+ return gipath, true, nil
+ }); done {
+ return res, err
}
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return "", err
+ if foundImage {
+ return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
}
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+}
- id, err = rcstore.Lookup(id)
- if err != nil {
- return "", err
- }
+func (s *store) ContainerDirectory(id string) (string, error) {
+ res, _, err := readContainerStore(s, func() (string, bool, error) {
+ id, err := s.containerStore.Lookup(id)
+ if err != nil {
+ return "", true, err
+ }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
- if err := os.MkdirAll(gcpath, 0700); err != nil {
- return "", err
- }
- return gcpath, nil
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(gcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return gcpath, true, nil
+ })
+ return res, err
}
-func (s *store) ContainerRunDirectory(id string) (string, error) {
- rcstore, err := s.ContainerStore()
- if err != nil {
- return "", err
- }
+func (s *store) ImageRunDirectory(id string) (string, error) {
+ foundImage := false
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
+ if store.Exists(id) {
+ foundImage = true
+ }
- rcstore.RLock()
- defer rcstore.Unlock()
- if err := rcstore.ReloadIfChanged(); err != nil {
- return "", err
+ middleDir := s.graphDriverName + "-images"
+ rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return rcpath, true, nil
+ }); done {
+ return res, err
}
-
- id, err = rcstore.Lookup(id)
- if err != nil {
- return "", err
+ if foundImage {
+ return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
}
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+}
- middleDir := s.graphDriverName + "-containers"
- rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
- if err := os.MkdirAll(rcpath, 0700); err != nil {
- return "", err
- }
- return rcpath, nil
+func (s *store) ContainerRunDirectory(id string) (string, error) {
+ res, _, err := readContainerStore(s, func() (string, bool, error) {
+ id, err := s.containerStore.Lookup(id)
+ if err != nil {
+ return "", true, err
+ }
+
+ middleDir := s.graphDriverName + "-containers"
+ rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return rcpath, true, nil
+ })
+ return res, err
}
func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
@@ -3416,11 +3536,11 @@ func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
if err != nil {
return err
}
- err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700)
if err != nil {
return err
}
- return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600)
}
func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
@@ -3428,7 +3548,7 @@ func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
@@ -3436,11 +3556,11 @@ func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error
if err != nil {
return err
}
- err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700)
if err != nil {
return err
}
- return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600)
}
func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
@@ -3448,26 +3568,25 @@ func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) Shutdown(force bool) ([]string, error) {
mounted := []string{}
- modified := false
- rlstore, err := s.LayerStore()
- if err != nil {
+ if err := s.startUsingGraphDriver(); err != nil {
return mounted, err
}
+ defer s.stopUsingGraphDriver()
- s.graphLock.Lock()
- defer s.graphLock.Unlock()
-
- rlstore.Lock()
- defer rlstore.Unlock()
- if err := rlstore.ReloadIfChanged(); err != nil {
+ rlstore, err := s.getLayerStoreLocked()
+ if err != nil {
+ return mounted, err
+ }
+ if err := rlstore.startWriting(); err != nil {
return nil, err
}
+ defer rlstore.stopWriting()
layers, err := rlstore.Layers()
if err != nil {
@@ -3479,28 +3598,35 @@ func (s *store) Shutdown(force bool) ([]string, error) {
}
mounted = append(mounted, layer.ID)
if force {
- for layer.MountCount > 0 {
- _, err2 := rlstore.Unmount(layer.ID, force)
+ for {
+ _, err2 := rlstore.unmount(layer.ID, force, true)
+ if err2 == ErrLayerNotMounted {
+ break
+ }
if err2 != nil {
if err == nil {
err = err2
}
break
}
- modified = true
}
}
}
if len(mounted) > 0 && err == nil {
- err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
+ err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer)
}
if err == nil {
- err = s.graphDriver.Cleanup()
- s.graphLock.Touch()
- modified = true
- }
- if modified {
- rlstore.Touch()
+ // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(),
+ // so that we reload after a .Shutdown() the same way other processes would.
+ // Shutdown() is basically an error path, so reliability is more important than performance.
+ if _, err2 := s.graphLock.RecordWrite(); err2 != nil {
+ err = fmt.Errorf("graphLock.RecordWrite failed: %w", err2)
+ }
+ // Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that
+ // the next user picks it.
+ if err == nil {
+ err = s.graphDriver.Cleanup()
+ }
}
return mounted, err
}
@@ -3524,56 +3650,57 @@ func makeBigDataBaseName(key string) string {
}
func stringSliceWithoutValue(slice []string, value string) []string {
- modified := make([]string, 0, len(slice))
- for _, v := range slice {
- if v == value {
- continue
- }
- modified = append(modified, v)
- }
- return modified
+ return slices.DeleteFunc(slices.Clone(slice), func(v string) bool {
+ return v == value
+ })
}
-func copyStringSlice(slice []string) []string {
- if len(slice) == 0 {
+// copySlicePreferringNil returns a copy of the slice.
+// If s is empty, a nil is returned.
+func copySlicePreferringNil[S ~[]E, E any](s S) S {
+ if len(s) == 0 {
return nil
}
- ret := make([]string, len(slice))
- copy(ret, slice)
- return ret
+ return slices.Clone(s)
}
-func copyStringInt64Map(m map[string]int64) map[string]int64 {
- ret := make(map[string]int64, len(m))
- for k, v := range m {
- ret[k] = v
+// copyMapPreferringNil returns a shallow clone of map m.
+// If m is empty, a nil is returned.
+//
+// (As of, e.g., Go 1.23, maps.Clone preserves nil, but that’s not a documented promise;
+// and this function turns even non-nil empty maps into nil.)
+func copyMapPreferringNil[K comparable, V any](m map[K]V) map[K]V {
+ if len(m) == 0 {
+ return nil
}
- return ret
+ return maps.Clone(m)
}
-func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
- ret := make(map[string]digest.Digest, len(m))
+// newMapFrom returns a shallow clone of map m.
+// If m is empty, an empty map is allocated and returned.
+func newMapFrom[K comparable, V any](m map[K]V) map[K]V {
+ ret := make(map[K]V, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
-func copyDigestSlice(slice []digest.Digest) []digest.Digest {
- if len(slice) == 0 {
- return nil
+func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption {
+ ret := make([]ImageBigDataOption, len(slice))
+ for i := range slice {
+ ret[i].Key = slice[i].Key
+ ret[i].Data = slices.Clone(slice[i].Data)
+ ret[i].Digest = slice[i].Digest
}
- ret := make([]digest.Digest, len(slice))
- copy(ret, slice)
return ret
}
-// copyStringInterfaceMap still forces us to assume that the interface{} is
-// a non-pointer scalar value
-func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
- ret := make(map[string]interface{}, len(m))
- for k, v := range m {
- ret[k] = v
+func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []ContainerBigDataOption {
+ ret := make([]ContainerBigDataOption, len(slice))
+ for i := range slice {
+ ret[i].Key = slice[i].Key
+ ret[i].Data = slices.Clone(slice[i].Data)
}
return ret
}
@@ -3588,25 +3715,30 @@ const AutoUserNsMaxSize = 65536
// creating a user namespace.
const RootAutoUserNsUser = "containers"
-// SetDefaultConfigFilePath sets the default configuration to the specified path
+// SetDefaultConfigFilePath sets the default configuration to the specified path, and loads the file.
+// Deprecated: Use types.SetDefaultConfigFilePath, which can return an error.
func SetDefaultConfigFilePath(path string) {
- types.SetDefaultConfigFilePath(path)
+ _ = types.SetDefaultConfigFilePath(path)
}
// DefaultConfigFile returns the path to the storage config file used
-func DefaultConfigFile(rootless bool) (string, error) {
- return types.DefaultConfigFile(rootless)
+func DefaultConfigFile() (string, error) {
+ return types.DefaultConfigFile()
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
+// Deprecated: Use types.ReloadConfigurationFile, which can return an error.
func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) {
- types.ReloadConfigurationFile(configFile, storeOptions)
+ _ = types.ReloadConfigurationFile(configFile, storeOptions)
}
// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
- defaultStoreOptions := types.Options()
+ defaultStoreOptions, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
}
@@ -3622,10 +3754,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
return nil, err
}
key = strings.ToLower(key)
- for _, m := range mountOpts {
- if m == key {
- return strings.Split(val, ","), nil
- }
+ if slices.Contains(mountOpts, key) {
+ return strings.Split(val, ","), nil
}
}
return nil, nil
@@ -3633,10 +3763,83 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
// Free removes the store from the list of stores
func (s *store) Free() {
- for i := 0; i < len(stores); i++ {
- if stores[i] == s {
- stores = append(stores[:i], stores[i+1:]...)
- return
+ if i := slices.Index(stores, s); i != -1 {
+ stores = slices.Delete(stores, i, i+1)
+ }
+}
+
+// Tries to clean up old unreferenced container leftovers. returns the first error
+// but continues as far as it can
+func (s *store) GarbageCollect() error {
+ _, firstErr := writeToContainerStore(s, func() (struct{}, error) {
+ return struct{}{}, s.containerStore.GarbageCollect()
+ })
+
+ _, moreErr := writeToImageStore(s, func() (struct{}, error) {
+ return struct{}{}, s.imageStore.GarbageCollect()
+ })
+ if firstErr == nil {
+ firstErr = moreErr
+ }
+
+ _, moreErr = writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
+ return struct{}{}, rlstore.GarbageCollect()
+ })
+ if firstErr == nil {
+ firstErr = moreErr
+ }
+
+ return firstErr
+}
+
+// List returns a MultiListResult structure that contains layer, image, or container
+// extracts according to the values in MultiListOptions.
+func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) {
+ // TODO: Possible optimization: Deduplicate content from multiple stores.
+ out := MultiListResult{}
+
+ if options.Layers {
+ layerStores, err := s.allLayerStores()
+ if err != nil {
+ return MultiListResult{}, err
+ }
+ for _, roStore := range layerStores {
+ if err := roStore.startReading(); err != nil {
+ return MultiListResult{}, err
+ }
+ defer roStore.stopReading()
+ layers, err := roStore.Layers()
+ if err != nil {
+ return MultiListResult{}, err
+ }
+ out.Layers = append(out.Layers, layers...)
+ }
+ }
+
+ if options.Images {
+ for _, roStore := range s.allImageStores() {
+ if err := roStore.startReading(); err != nil {
+ return MultiListResult{}, err
+ }
+ defer roStore.stopReading()
+
+ images, err := roStore.Images()
+ if err != nil {
+ return MultiListResult{}, err
+ }
+ out.Images = append(out.Images, images...)
+ }
+ }
+
+ if options.Containers {
+ containers, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+ res, err := s.containerStore.Containers()
+ return res, true, err
+ })
+ if err != nil {
+ return MultiListResult{}, err
}
+ out.Containers = append(out.Containers, containers...)
}
+ return out, nil
}
diff --git a/vendor/github.com/containers/storage/types/default_override_test.conf b/vendor/github.com/containers/storage/types/default_override_test.conf
new file mode 100644
index 000000000..caa537ba9
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/default_override_test.conf
@@ -0,0 +1,11 @@
+[storage]
+
+# Default Storage Driver
+driver = ""
+
+# Primary Read/Write location of container storage
+graphroot = "environment_override_graphroot"
+
+# Storage path for rootless users
+#
+rootless_storage_path = "environment_override_rootless_storage_path"
diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go
index d920d12eb..845b14eed 100644
--- a/vendor/github.com/containers/storage/types/errors.go
+++ b/vendor/github.com/containers/storage/types/errors.go
@@ -55,4 +55,45 @@ var (
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = errors.New("not supported")
+ // ErrInvalidMappings is returned when the specified mappings are invalid.
+ ErrInvalidMappings = errors.New("invalid mappings specified")
+ // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace.
+ ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace")
+
+ // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver,
+ // but which is not known to or managed by the higher-level driver-agnostic logic.
+ ErrLayerUnaccounted = errors.New("layer in lower level storage driver not accounted for")
+ // ErrLayerUnreferenced describes a layer which is not used by any image or container.
+ ErrLayerUnreferenced = errors.New("layer not referenced by any images or containers")
+ // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more
+ // files which were added in the layer appear to have changed. It may instead look like an
+ // unnamed "file integrity checksum failed" error.
+ ErrLayerIncorrectContentDigest = errors.New("layer content incorrect digest")
+ // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was
+ // used to populate the layer produced a diff of a different size. We check the digest
+ // first, so it's highly unlikely you'll ever see this error.
+ ErrLayerIncorrectContentSize = errors.New("layer content incorrect size")
+ // ErrLayerContentModified describes a layer which contains contents which should not be
+ // there, or for which ownership/permissions/dates have been changed.
+ ErrLayerContentModified = errors.New("layer content modified")
+ // ErrLayerDataMissing describes a layer which is missing a big data item.
+ ErrLayerDataMissing = errors.New("layer data item is missing")
+ // ErrLayerMissing describes a layer which is the missing parent of a layer.
+ ErrLayerMissing = errors.New("layer is missing")
+ // ErrImageLayerMissing describes an image which claims to have a layer that we don't know
+ // about.
+ ErrImageLayerMissing = errors.New("image layer is missing")
+ // ErrImageDataMissing describes an image which is missing a big data item.
+ ErrImageDataMissing = errors.New("image data item is missing")
+ // ErrImageDataIncorrectSize describes an image which has a big data item which looks like
+ // its size has changed, likely because it's been modified somehow.
+ ErrImageDataIncorrectSize = errors.New("image data item has incorrect size")
+ // ErrContainerImageMissing describes a container which claims to be based on an image that
+ // we don't know about.
+ ErrContainerImageMissing = errors.New("image missing")
+ // ErrContainerDataMissing describes a container which is missing a big data item.
+ ErrContainerDataMissing = errors.New("container data item is missing")
+ // ErrContainerDataIncorrectSize describes a container which has a big data item which looks
+ // like its size has changed, likely because it's been modified somehow.
+ ErrContainerDataIncorrectSize = errors.New("container data item has incorrect size")
)
diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go
index 82824ae2b..aabdf7a81 100644
--- a/vendor/github.com/containers/storage/types/idmappings.go
+++ b/vendor/github.com/containers/storage/types/idmappings.go
@@ -5,7 +5,6 @@ import (
"os"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
)
// AutoUserNsOptions defines how to automatically create a user namespace.
@@ -77,18 +76,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
+ return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err)
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err)
}
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err)
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index c0e3ea637..efc08c476 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -1,65 +1,157 @@
package types
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
- "os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
- "github.com/containers/storage/drivers/overlay"
cfg "github.com/containers/storage/pkg/config"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
// TOML-friendly explicit tables used for conversions.
-type tomlConfig struct {
+type TomlConfig struct {
Storage struct {
- Driver string `toml:"driver"`
- RunRoot string `toml:"runroot"`
- GraphRoot string `toml:"graphroot"`
- RootlessStoragePath string `toml:"rootless_storage_path"`
- Options cfg.OptionsConfig `toml:"options"`
+ Driver string `toml:"driver,omitempty"`
+ DriverPriority []string `toml:"driver_priority,omitempty"`
+ RunRoot string `toml:"runroot,omitempty"`
+ ImageStore string `toml:"imagestore,omitempty"`
+ GraphRoot string `toml:"graphroot,omitempty"`
+ RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
+ TransientStore bool `toml:"transient_store,omitempty"`
+ Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"`
}
-// defaultConfigFile path to the system wide storage.conf file
+const (
+ overlayDriver = "overlay"
+ overlay2 = "overlay2"
+ storageConfEnv = "CONTAINERS_STORAGE_CONF"
+)
+
var (
- defaultConfigFile = "/etc/containers/storage.conf"
- defaultConfigFileSet = false
+ defaultStoreOptionsOnce sync.Once
+ loadDefaultStoreOptionsErr error
+ once sync.Once
+ storeOptions StoreOptions
+ storeError error
+ defaultConfigFileSet bool
+ // defaultConfigFile path to the system wide storage.conf file
+ defaultConfigFile = SystemConfigFile
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)
-func init() {
- defaultStoreOptions.RunRoot = "/run/containers/storage"
- defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
+func loadDefaultStoreOptions() {
defaultStoreOptions.GraphDriverName = ""
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ setDefaults := func() {
+ // reload could set values to empty for run and graph root if config does not contains anything
+ if defaultStoreOptions.RunRoot == "" {
+ defaultStoreOptions.RunRoot = defaultRunRoot
+ }
+ if defaultStoreOptions.GraphRoot == "" {
+ defaultStoreOptions.GraphRoot = defaultGraphRoot
+ }
+ }
+ setDefaults()
+
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
+ defaultOverrideConfigFile = path
+ if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
+ return
+ }
+
+ if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok {
+ homeConfigFile := filepath.Join(path, "containers", "storage.conf")
+ if err := fileutils.Exists(homeConfigFile); err == nil {
+ // user storage.conf in XDG_CONFIG_HOME if it exists
+ defaultOverrideConfigFile = homeConfigFile
+ } else {
+ if !os.IsNotExist(err) {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ }
+ }
+
+ err := fileutils.Exists(defaultOverrideConfigFile)
+ if err == nil {
+ // The DefaultConfigFile() function returns the path
+ // of the used storage.conf file, by returning defaultConfigFile
+ // If override exists containers/storage uses it by default.
+ defaultConfigFile = defaultOverrideConfigFile
+ if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
+ return
+ }
+
+ if !os.IsNotExist(err) {
+ logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err)
+ }
+ if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
+}
+
+// loadStoreOptions returns the default storage ops for containers
+func loadStoreOptions() (StoreOptions, error) {
+ storageConf, err := DefaultConfigFile()
+ if err != nil {
+ return defaultStoreOptions, err
+ }
+ return loadStoreOptionsFromConfFile(storageConf)
+}
+
+// usePerUserStorage returns whether the user private storage must be used.
+// We cannot simply use the unshare.IsRootless() condition, because
+// that checks only if the current process needs a user namespace to
+// work and it would break cases where the process is already created
+// in a user namespace (e.g. nested Podman/Buildah) and the desired
+// behavior is to use system paths instead of user private paths.
+func usePerUserStorage() bool {
+ return unshare.IsRootless() && unshare.GetRootlessUID() != 0
}
-// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.
-// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.
-func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {
+// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing.
+// Everyone but the tests this is intended for should only call loadStoreOptions, never this function.
+func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
+
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ if loadDefaultStoreOptionsErr != nil {
+ return StoreOptions{}, loadDefaultStoreOptionsErr
+ }
storageOpts := defaultStoreOptions
- if rootless && rootlessUID != 0 {
- storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
+ if usePerUserStorage() {
+ storageOpts, err = getRootlessStorageOpts(storageOpts)
if err != nil {
return storageOpts, err
}
}
- _, err = os.Stat(storageConf)
+ err = fileutils.Exists(storageConf)
if err != nil && !os.IsNotExist(err) {
return storageOpts, err
}
@@ -68,7 +160,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
- if rootless && rootlessUID != 0 {
+ if usePerUserStorage() {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
@@ -84,20 +176,25 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
}
}
}
- if storageOpts.RunRoot != "" {
- runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
- if err != nil {
- return storageOpts, err
- }
- storageOpts.RunRoot = runRoot
+ if storageOpts.RunRoot == "" {
+ return storageOpts, fmt.Errorf("runroot must be set")
}
- if storageOpts.GraphRoot != "" {
- graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID)
- if err != nil {
- return storageOpts, err
- }
- storageOpts.GraphRoot = graphRoot
+ rootlessUID := unshare.GetRootlessUID()
+ runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
+ if err != nil {
+ return storageOpts, err
+ }
+ storageOpts.RunRoot = runRoot
+
+ if storageOpts.GraphRoot == "" {
+ return storageOpts, fmt.Errorf("graphroot must be set")
}
+ graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID)
+ if err != nil {
+ return storageOpts, err
+ }
+ storageOpts.GraphRoot = graphRoot
+
if storageOpts.RootlessStoragePath != "" {
storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID)
if err != nil {
@@ -106,16 +203,26 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
storageOpts.RootlessStoragePath = storagePath
}
+ if storageOpts.ImageStore != "" && storageOpts.ImageStore == storageOpts.GraphRoot {
+ return storageOpts, fmt.Errorf("imagestore %s must either be not set or be a different than graphroot", storageOpts.ImageStore)
+ }
+
return storageOpts, nil
}
+// UpdateOptions should be called iff container engine received a SIGHUP,
+// otherwise use DefaultStoreOptions
+func UpdateStoreOptions() (StoreOptions, error) {
+ storeOptions, storeError = loadStoreOptions()
+ return storeOptions, storeError
+}
+
// DefaultStoreOptions returns the default storage ops for containers
-func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
- storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)
- if err != nil {
- return defaultStoreOptions, err
- }
- return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)
+func DefaultStoreOptions() (StoreOptions, error) {
+ once.Do(func() {
+ storeOptions, storeError = loadStoreOptions()
+ })
+ return storeOptions, storeError
}
// StoreOptions is used for passing initialization options to GetStore(), for
@@ -128,13 +235,22 @@ type StoreOptions struct {
// GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"`
+ // Image Store is the alternate location of image store if a location
+ // separate from the container store is required.
+ ImageStore string `json:"imagestore,omitempty"`
// RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage
RootlessStoragePath string `toml:"rootless_storage_path"`
- // GraphDriverName is the underlying storage driver that we'll be
- // using. It only needs to be specified the first time a Store is
- // initialized for a given RunRoot and GraphRoot.
+ // If the driver is not specified, the best suited driver will be picked
+ // either from GraphDriverPriority, if specified, or from the platform
+ // dependent priority list (in that order).
GraphDriverName string `json:"driver,omitempty"`
+ // GraphDriverPriority is a list of storage drivers that will be tried
+ // to initialize the Store for a given RunRoot and GraphRoot unless a
+ // GraphDriverName is set.
+ // This list can be used to define a custom order in which the drivers
+ // will be tried.
+ GraphDriverPriority []string `json:"driver-priority,omitempty"`
// GraphDriverOptions are driver-specific options.
GraphDriverOptions []string `json:"driver-options,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root filesystem
@@ -153,6 +269,8 @@ type StoreOptions struct {
PullOptions map[string]string `toml:"pull_options"`
// DisableVolatile doesn't allow volatile mounts when it is set.
DisableVolatile bool `json:"disable-volatile,omitempty"`
+ // If transient, don't persist containers over boot (stores db in runroot)
+ TransientStore bool `json:"transient_store,omitempty"`
}
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root
@@ -167,15 +285,27 @@ func isRootlessDriver(driver string) bool {
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
-func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
+func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
var opts StoreOptions
- const overlayDriver = "overlay"
- dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
+ rootlessUID := unshare.GetRootlessUID()
+
+ dataDir, err := homedir.GetDataHome()
if err != nil {
return opts, err
}
- opts.RunRoot = rootlessRuntime
+
+ rootlessRuntime, err := homedir.GetRuntimeDir()
+ if err != nil {
+ return opts, err
+ }
+
+ opts.RunRoot = filepath.Join(rootlessRuntime, "containers")
+ if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil {
+ return opts, fmt.Errorf("unable to make rootless runtime: %w", err)
+ }
+
+ opts.PullOptions = systemOpts.PullOptions
if systemOpts.RootlessStoragePath != "" {
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
if err != nil {
@@ -191,31 +321,47 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
if driver := os.Getenv("STORAGE_DRIVER"); driver != "" {
opts.GraphDriverName = driver
}
- if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
- supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
- if err != nil {
- return opts, err
- }
- if supported {
- opts.GraphDriverName = overlayDriver
- } else {
- if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
- opts.GraphDriverName = overlayDriver
- opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
+ if opts.GraphDriverName == overlay2 {
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
+ opts.GraphDriverName = overlayDriver
+ }
+
+ // If the configuration file was explicitly set, then copy all the options
+ // present.
+ if defaultConfigFileSet {
+ opts.GraphDriverOptions = systemOpts.GraphDriverOptions
+ opts.ImageStore = systemOpts.ImageStore
+ } else if opts.GraphDriverName == overlayDriver {
+ for _, o := range systemOpts.GraphDriverOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+ break
}
}
- if opts.GraphDriverName == overlayDriver {
- for _, o := range systemOpts.GraphDriverOptions {
- if strings.Contains(o, "ignore_chown_errors") {
- opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
- break
+ }
+ if opts.GraphDriverName == "" {
+ if len(systemOpts.GraphDriverPriority) == 0 {
+ dirEntries, err := os.ReadDir(opts.GraphRoot)
+ if err == nil {
+ for _, entry := range dirEntries {
+ if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok {
+ opts.GraphDriverName = name
+ break
+ }
+ }
+ }
+
+ if opts.GraphDriverName == "" {
+ if canUseRootlessOverlay() {
+ opts.GraphDriverName = overlayDriver
+ } else {
+ opts.GraphDriverName = "vfs"
}
}
+ } else {
+ opts.GraphDriverPriority = systemOpts.GraphDriverPriority
}
}
- if opts.GraphDriverName == "" {
- opts.GraphDriverName = "vfs"
- }
if os.Getenv("STORAGE_OPTS") != "" {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
@@ -224,12 +370,6 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
return opts, nil
}
-// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
-func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
- uid := getRootlessUID()
- return DefaultStoreOptions(uid != 0, uid)
-}
-
var prevReloadConfig = struct {
storeOptions *StoreOptions
mod time.Time
@@ -238,56 +378,57 @@ var prevReloadConfig = struct {
}{}
// SetDefaultConfigFilePath sets the default configuration to the specified path
-func SetDefaultConfigFilePath(path string) {
+func SetDefaultConfigFilePath(path string) error {
defaultConfigFile = path
defaultConfigFileSet = true
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
-func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
+func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
- if !os.IsNotExist(err) {
- fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- }
- return
+ return err
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
- return
+ return nil
}
- ReloadConfigurationFile(configFile, storeOptions)
+ if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
+ return err
+ }
- prevReloadConfig.storeOptions = storeOptions
+ cOptions := *storeOptions
+ prevReloadConfig.storeOptions = &cOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
+ return nil
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
-func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
- data, err := ioutil.ReadFile(configFile)
- if err != nil {
+func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error {
+ config := new(TomlConfig)
+
+ meta, err := toml.DecodeFile(configFile, &config)
+ if err == nil {
+ keys := meta.Undecoded()
+ if len(keys) > 0 {
+ logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile)
+ }
+ } else {
if !os.IsNotExist(err) {
- fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- return
+ logrus.Warningf("Failed to read %s %v\n", configFile, err.Error())
+ return err
}
}
- config := new(tomlConfig)
-
- if _, err := toml.Decode(string(data), config); err != nil {
- fmt.Printf("Failed to parse %s %v\n", configFile, err.Error())
- return
- }
-
- // Clear storeOptions of previos settings
+ // Clear storeOptions of previous settings
*storeOptions = StoreOptions{}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
@@ -296,8 +437,13 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
config.Storage.Driver = os.Getenv("STORAGE_DRIVER")
storeOptions.GraphDriverName = config.Storage.Driver
}
- if storeOptions.GraphDriverName == "" {
- logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile)
+ if storeOptions.GraphDriverName == overlay2 {
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
+ storeOptions.GraphDriverName = overlayDriver
+ }
+ storeOptions.GraphDriverPriority = config.Storage.DriverPriority
+ if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 {
+ logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile)
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
@@ -305,6 +451,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
+ if config.Storage.ImageStore != "" {
+ storeOptions.ImageStore = config.Storage.ImageStore
+ }
if config.Storage.RootlessStoragePath != "" {
storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath
}
@@ -332,34 +481,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
- if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
- config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
- }
- if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
- config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
- }
- if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
- mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
- if err != nil {
- fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
- return
- }
- storeOptions.UIDMap = mappings.UIDs()
- storeOptions.GIDMap = mappings.GIDs()
- }
-
- uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
- if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.UIDMap = uidmap
- }
- gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
- if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.GIDMap = gidmap
- }
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
@@ -372,6 +493,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
}
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
+ storeOptions.TransientStore = config.Storage.TransientStore
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
@@ -381,8 +503,46 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
+ return nil
}
-func Options() StoreOptions {
- return defaultStoreOptions
+func Options() (StoreOptions, error) {
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ return defaultStoreOptions, loadDefaultStoreOptionsErr
+}
+
+// Save overwrites the tomlConfig in storage.conf with the given conf
+func Save(conf TomlConfig) error {
+ configFile, err := DefaultConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil {
+ return err
+ }
+
+ f, err := os.Create(configFile)
+ if err != nil {
+ return err
+ }
+
+ return toml.NewEncoder(f).Encode(conf)
+}
+
+// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
+func StorageConfig() (*TomlConfig, error) {
+ config := new(TomlConfig)
+
+ configFile, err := DefaultConfigFile()
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = toml.DecodeFile(configFile, &config)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
}
diff --git a/vendor/github.com/containers/storage/types/options_bsd.go b/vendor/github.com/containers/storage/types/options_bsd.go
new file mode 100644
index 000000000..040fdc797
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_bsd.go
@@ -0,0 +1,21 @@
+//go:build freebsd || netbsd
+
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/var/run/containers/storage"
+ defaultGraphRoot string = "/var/db/containers/storage"
+ SystemConfigFile = "/usr/local/share/containers/storage.conf"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf"
+)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go
new file mode 100644
index 000000000..27ba6a061
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_darwin.go
@@ -0,0 +1,16 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+ SystemConfigFile = "/usr/share/containers/storage.conf"
+)
+
+var defaultOverrideConfigFile = "/etc/containers/storage.conf"
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go
new file mode 100644
index 000000000..09cbae54b
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_linux.go
@@ -0,0 +1,52 @@
+package types
+
+import (
+ "os/exec"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+ SystemConfigFile = "/usr/share/containers/storage.conf"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultOverrideConfigFile = "/etc/containers/storage.conf"
+)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay() bool {
+ // we check first for fuse-overlayfs since it is cheaper.
+ if path, _ := exec.LookPath("fuse-overlayfs"); path != "" {
+ return true
+ }
+
+ // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman
+ // before we enter the user namespace and the driver we pick here is written in the podman database.
+ // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL
+ // but this is just an heuristic and on RHEL we always install the storage.conf file.
+ // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check
+ // that the kernel is >= 5.13.
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err == nil {
+ parts := strings.Split(string(uts.Release[:]), ".")
+ major, _ := strconv.Atoi(parts[0])
+ if major >= 6 {
+ return true
+ }
+ if major == 5 && len(parts) > 1 {
+ minor, _ := strconv.Atoi(parts[1])
+ if minor >= 13 {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go
new file mode 100644
index 000000000..99a67ff21
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_windows.go
@@ -0,0 +1,19 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+ SystemConfigFile = "/usr/share/containers/storage.conf"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultOverrideConfigFile = "/etc/containers/storage.conf"
+)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/storage_broken.conf b/vendor/github.com/containers/storage/types/storage_broken.conf
new file mode 100644
index 000000000..3bca1d978
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/storage_broken.conf
@@ -0,0 +1,18 @@
+# This file is is a TEST configuration file for all tools
+# that use the containers/storage library.
+# See man 5 containers-storage.conf for more information
+# The "container storage" table contains all of the server options.
+foo = "bar"
+
+[storage]
+
+# Default Storage Driver
+driver = ""
+
+# Temporary storage location
+runroot = "/run/containers/test"
+
+[storage.options]
+# Primary Read/Write location of container storage
+graphroot = "/var/lib/containers/storage"
+
diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf
index 9b682fe15..761b3a795 100644
--- a/vendor/github.com/containers/storage/types/storage_test.conf
+++ b/vendor/github.com/containers/storage/types/storage_test.conf
@@ -29,7 +29,3 @@ additionalimagestores = [
# mountopt specifies comma separated list of extra mount options
mountopt = "nodev"
-
-
-[storage.options.thinpool]
-# Storage Options for thinpool
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index d2dca7b68..73fcd2405 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -1,166 +1,40 @@
package types
import (
- "fmt"
- "io/ioutil"
+ "errors"
"os"
"path/filepath"
"strconv"
"strings"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
- "github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// GetRootlessRuntimeDir returns the runtime directory when running as non root
-func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
- path, err := getRootlessRuntimeDir(rootlessUID)
+func expandEnvPath(path string, rootlessUID int) (string, error) {
+ var err error
+ path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
+ path = os.ExpandEnv(path)
+ newpath, err := filepath.EvalSymlinks(path)
if err != nil {
- return "", err
+ newpath = filepath.Clean(path)
}
- path = filepath.Join(path, "containers")
- if err := os.MkdirAll(path, 0700); err != nil {
- return "", errors.Wrapf(err, "unable to make rootless runtime")
- }
- return path, nil
-}
-
-type rootlessRuntimeDirEnvironment interface {
- getProcCommandFile() string
- getRunUserDir() string
- getTmpPerUserDir() string
-
- homeDirGetRuntimeDir() (string, error)
- systemLstat(string) (*system.StatT, error)
- homedirGet() string
-}
-
-type rootlessRuntimeDirEnvironmentImplementation struct {
- procCommandFile string
- runUserDir string
- tmpPerUserDir string
-}
-
-func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {
- return env.procCommandFile
-}
-func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {
- return env.runUserDir
-}
-func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {
- return env.tmpPerUserDir
-}
-func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {
- return homedir.GetRuntimeDir()
-}
-func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {
- return system.Lstat(path)
-}
-func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {
- return homedir.Get()
-}
-
-func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {
- st, err := env.systemLstat(dir)
- return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000
+ return newpath, nil
}
-// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.
-// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.
-func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {
- runtimeDir, err := env.homeDirGetRuntimeDir()
- if err == nil {
- return runtimeDir, nil
+func DefaultConfigFile() (string, error) {
+ if defaultConfigFileSet {
+ return defaultConfigFile, nil
}
- initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
- if err != nil || string(initCommand) == "systemd" {
- runUserDir := env.getRunUserDir()
- if isRootlessRuntimeDirOwner(runUserDir, env) {
- return runUserDir, nil
- }
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
+ return path, nil
}
-
- tmpPerUserDir := env.getTmpPerUserDir()
- if tmpPerUserDir != "" {
- if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
- if err := os.Mkdir(tmpPerUserDir, 0700); err != nil {
- logrus.Errorf("failed to create temp directory for user: %v", err)
- } else {
- return tmpPerUserDir, nil
- }
- } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {
- return tmpPerUserDir, nil
+ if !usePerUserStorage() {
+ if err := fileutils.Exists(defaultOverrideConfigFile); err == nil {
+ return defaultOverrideConfigFile, nil
}
- }
-
- homeDir := env.homedirGet()
- if homeDir == "" {
- return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty")
- }
- resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
- if err != nil {
- return "", err
- }
- return filepath.Join(resolvedHomeDir, "rundir"), nil
-}
-
-func getRootlessRuntimeDir(rootlessUID int) (string, error) {
- return getRootlessRuntimeDirIsolated(
- rootlessRuntimeDirEnvironmentImplementation{
- "/proc/1/comm",
- fmt.Sprintf("/run/user/%d", rootlessUID),
- fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID),
- },
- )
-}
-
-// getRootlessDirInfo returns the parent path of where the storage for containers and
-// volumes will be in rootless mode
-func getRootlessDirInfo(rootlessUID int) (string, string, error) {
- rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)
- if err != nil {
- return "", "", err
- }
-
- dataDir, err := homedir.GetDataHome()
- if err == nil {
- return dataDir, rootlessRuntime, nil
- }
-
- home := homedir.Get()
- if home == "" {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
- }
- // runc doesn't like symlinks in the rootfs path, and at least
- // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", "", err
- }
- dataDir = filepath.Join(resolvedHome, ".local", "share")
-
- return dataDir, rootlessRuntime, nil
-}
-
-func getRootlessUID() int {
- uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
- if uidEnv != "" {
- u, _ := strconv.Atoi(uidEnv)
- return u
- }
- return os.Geteuid()
-}
-
-func expandEnvPath(path string, rootlessUID int) (string, error) {
- path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
- return filepath.Clean(os.ExpandEnv(path)), nil
-}
-
-func DefaultConfigFile(rootless bool) (string, error) {
- if defaultConfigFileSet || !rootless {
return defaultConfigFile, nil
}
@@ -181,7 +55,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
fi, err := os.Stat(configFile)
if err != nil {
if !os.IsNotExist(err) {
- fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
+ logrus.Warningf("Failed to read %s %v\n", configFile, err.Error())
}
return
}
@@ -192,7 +66,10 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
return
}
- ReloadConfigurationFile(configFile, storeOptions)
+ if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
+ logrus.Warningf("Failed to reload %q %v\n", configFile, err)
+ return
+ }
prevReloadConfig.storeOptions = storeOptions
prevReloadConfig.mod = mtime
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 3ada41f73..09919394c 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -1,18 +1,21 @@
+//go:build linux
+
package storage
import (
+ "fmt"
"os"
"os/user"
- "path/filepath"
"strconv"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
- libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
- "github.com/pkg/errors"
+ securejoin "github.com/cyphar/filepath-securejoin"
+ libcontainerUser "github.com/moby/sys/user"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
)
// getAdditionalSubIDs looks up the additional IDs configured for
@@ -43,7 +46,7 @@ func getAdditionalSubIDs(username string) (*idSet, *idSet, error) {
}
mappings, err := idtools.NewIDMappings(username, username)
if err != nil {
- logrus.Errorf("cannot find mappings for user %q: %v", username, err)
+ logrus.Errorf("Cannot find mappings for user %q: %v", username, err)
} else {
uids = getHostIDs(mappings.UIDs())
gids = getHostIDs(mappings.GIDs())
@@ -78,43 +81,66 @@ func (s *store) getAvailableIDs() (*idSet, *idSet, error) {
return u, g, nil
}
+// nobodyUser returns the UID and GID of the "nobody" user. Hardcode its value
+// for simplicity.
+const nobodyUser = 65534
+
// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
// /etc/group files.
func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
+ var (
+ passwd *os.File
+ group *os.File
+ size int
+ err error
+ )
if passwdFile == "" {
- passwdFile = filepath.Join(containerMount, "etc/passwd")
- }
- if groupFile == "" {
- groupFile = filepath.Join(groupFile, "etc/group")
+ passwd, err = secureOpen(containerMount, "/etc/passwd")
+ } else {
+ // User-specified override from a volume. Will not be in
+ // container root.
+ passwd, err = os.Open(passwdFile)
}
-
- size := 0
-
- users, err := libcontainerUser.ParsePasswdFile(passwdFile)
if err == nil {
- for _, u := range users {
- // Skip the "nobody" user otherwise we end up with 65536
- // ids with most images
- if u.Name == "nobody" {
- continue
- }
- if u.Uid > size {
- size = u.Uid
- }
- if u.Gid > size {
- size = u.Gid
+ defer passwd.Close()
+
+ users, err := libcontainerUser.ParsePasswd(passwd)
+ if err == nil {
+ for _, u := range users {
+ // Skip the "nobody" user otherwise we end up with 65536
+ // ids with most images
+ if u.Name == "nobody" || u.Name == "nogroup" {
+ continue
+ }
+ if u.Uid > size && u.Uid != nobodyUser {
+ size = u.Uid + 1
+ }
+ if u.Gid > size && u.Gid != nobodyUser {
+ size = u.Gid + 1
+ }
}
}
}
- groups, err := libcontainerUser.ParseGroupFile(groupFile)
+ if groupFile == "" {
+ group, err = secureOpen(containerMount, "/etc/group")
+ } else {
+ // User-specified override from a volume. Will not be in
+ // container root.
+ group, err = os.Open(groupFile)
+ }
if err == nil {
- for _, g := range groups {
- if g.Name == "nobody" {
- continue
- }
- if g.Gid > size {
- size = g.Gid
+ defer group.Close()
+
+ groups, err := libcontainerUser.ParseGroup(group)
+ if err == nil {
+ for _, g := range groups {
+ if g.Name == "nobody" || g.Name == "nogroup" {
+ continue
+ }
+ if g.Gid > size && g.Gid != nobodyUser {
+ size = g.Gid + 1
+ }
}
}
}
@@ -123,16 +149,9 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
}
// getMaxSizeFromImage returns the maximum ID used by the specified image.
-// The layer stores must be already locked.
-func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) {
- lstore, err := s.LayerStore()
- if err != nil {
- return 0, err
- }
- lstores, err := s.ROLayerStores()
- if err != nil {
- return 0, err
- }
+// On entry, rlstore must be locked for writing, and lstores must be locked for reading.
+func (s *store) getMaxSizeFromImage(image *Image, rlstore rwLayerStore, lstores []roLayerStore, passwdFile, groupFile string) (_ uint32, retErr error) {
+ layerStores := append([]roLayerStore{rlstore}, lstores...)
size := uint32(0)
@@ -140,7 +159,7 @@ func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFi
layerName := image.TopLayer
outer:
for {
- for _, ls := range append([]ROLayerStore{lstore}, lstores...) {
+ for _, ls := range layerStores {
layer, err := ls.Get(layerName)
if err != nil {
continue
@@ -164,12 +183,7 @@ outer:
}
continue outer
}
- return 0, errors.Errorf("cannot find layer %q", layerName)
- }
-
- rlstore, err := s.LayerStore()
- if err != nil {
- return 0, err
+ return 0, fmt.Errorf("cannot find layer %q", layerName)
}
layerOptions := &LayerOptions{
@@ -183,11 +197,19 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
- clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false)
+ clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
if err != nil {
return 0, err
}
- defer rlstore.Delete(clayer.ID)
+ defer func() {
+ if err2 := rlstore.Delete(clayer.ID); err2 != nil {
+ if retErr == nil {
+ retErr = fmt.Errorf("deleting temporary layer %#v: %w", clayer.ID, err2)
+ } else {
+ logrus.Errorf("Error deleting temporary layer %#v: %v", clayer.ID, err2)
+ }
+ }
+ }()
mountOptions := drivers.MountOpts{
MountLabel: "",
@@ -200,7 +222,15 @@ outer:
if err != nil {
return 0, err
}
- defer rlstore.Unmount(clayer.ID, true)
+ defer func() {
+ if _, err2 := rlstore.unmount(clayer.ID, true, false); err2 != nil {
+ if retErr == nil {
+ retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2)
+ } else {
+ logrus.Errorf("Error unmounting temporary layer %#v: %v", clayer.ID, err2)
+ }
+ }
+ }()
userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile)
if userFilesSize > size {
@@ -211,7 +241,8 @@ outer:
}
// getAutoUserNS creates an automatic user namespace
-func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
+// If image != nil, On entry, rlstore must be locked for writing, and lstores must be locked for reading.
+func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rlstore rwLayerStore, lstores []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) {
requestedSize := uint32(0)
initialSize := uint32(1)
if options.Size > 0 {
@@ -223,10 +254,10 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
availableUIDs, availableGIDs, err := s.getAvailableIDs()
if err != nil {
- return nil, nil, errors.Wrapf(err, "cannot read mappings")
+ return nil, nil, fmt.Errorf("cannot read mappings: %w", err)
}
- // Look every container that is using a user namespace and store
+ // Look at every container that is using a user namespace and store
// the intervals that are already used.
containers, err := s.Containers()
if err != nil {
@@ -250,7 +281,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
size = s.autoNsMinSize
}
if image != nil {
- sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile)
+ sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile)
if err != nil {
return nil, nil, err
}
@@ -259,7 +290,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
}
}
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
- return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
+ return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize)
}
}
@@ -300,3 +331,14 @@ func getAutoUserNSIDMappings(
gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...)
return uidMap, gidMap, nil
}
+
+// Securely open (read-only) a file in a container mount.
+func secureOpen(containerMount, file string) (*os.File, error) {
+ tmpFile, err := securejoin.OpenInRoot(containerMount, file)
+ if err != nil {
+ return nil, err
+ }
+ defer tmpFile.Close()
+
+ return securejoin.Reopen(tmpFile, unix.O_RDONLY)
+}
diff --git a/vendor/github.com/containers/storage/userns_unsupported.go b/vendor/github.com/containers/storage/userns_unsupported.go
new file mode 100644
index 000000000..e37c18fe4
--- /dev/null
+++ b/vendor/github.com/containers/storage/userns_unsupported.go
@@ -0,0 +1,14 @@
+//go:build !linux
+
+package storage
+
+import (
+ "errors"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/types"
+)
+
+func (s *store) getAutoUserNS(_ *types.AutoUserNsOptions, _ *Image, _ rwLayerStore, _ []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) {
+ return nil, nil, errors.New("user namespaces are not supported on this platform")
+}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 80d56041b..c61d79837 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -2,6 +2,7 @@ package storage
import (
"fmt"
+ "slices"
"github.com/containers/storage/types"
)
@@ -11,19 +12,9 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap)
}
-// GetRootlessRuntimeDir returns the runtime directory when running as non root
-func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
- return types.GetRootlessRuntimeDir(rootlessUID)
-}
-
-// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
-func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) {
- return types.DefaultStoreOptionsAutoDetectUID()
-}
-
-// DefaultStoreOptions returns the default storage ops for containers
-func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) {
- return types.DefaultStoreOptions(rootless, rootlessUID)
+// DefaultStoreOptions returns the default storage options for containers
+func DefaultStoreOptions() (types.StoreOptions, error) {
+ return types.DefaultStoreOptions()
}
func validateMountOptions(mountOptions []string) error {
@@ -40,3 +31,25 @@ func validateMountOptions(mountOptions []string) error {
}
return nil
}
+
+func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) {
+ var result []string
+ switch op {
+ case setNames:
+ // ignore all old names and just return new names
+ result = opParameters
+ case removeNames:
+ // remove given names from old names
+ result = make([]string, 0, len(oldNames))
+ for _, name := range oldNames {
+ if !slices.Contains(opParameters, name) {
+ result = append(result, name)
+ }
+ }
+ case addNames:
+ result = slices.Concat(opParameters, oldNames)
+ default:
+ return result, errInvalidUpdateNameOperation
+ }
+ return dedupeStrings(result), nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
index e843a4613..147f756fe 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
@@ -111,14 +111,13 @@ type Conn struct {
}
}
-// New establishes a connection to any available bus and authenticates.
-// Callers should call Close() when done with the connection.
-// Deprecated: use NewWithContext instead
+// Deprecated: use NewWithContext instead.
func New() (*Conn, error) {
return NewWithContext(context.Background())
}
-// NewWithContext same as New with context
+// NewWithContext establishes a connection to any available bus and authenticates.
+// Callers should call Close() when done with the connection.
func NewWithContext(ctx context.Context) (*Conn, error) {
conn, err := NewSystemConnectionContext(ctx)
if err != nil && os.Geteuid() == 0 {
@@ -127,44 +126,41 @@ func NewWithContext(ctx context.Context) (*Conn, error) {
return conn, err
}
-// NewSystemConnection establishes a connection to the system bus and authenticates.
-// Callers should call Close() when done with the connection
-// Deprecated: use NewSystemConnectionContext instead
+// Deprecated: use NewSystemConnectionContext instead.
func NewSystemConnection() (*Conn, error) {
return NewSystemConnectionContext(context.Background())
}
-// NewSystemConnectionContext same as NewSystemConnection with context
+// NewSystemConnectionContext establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection.
func NewSystemConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate)
})
}
-// NewUserConnection establishes a connection to the session bus and
-// authenticates. This can be used to connect to systemd user instances.
-// Callers should call Close() when done with the connection.
-// Deprecated: use NewUserConnectionContext instead
+// Deprecated: use NewUserConnectionContext instead.
func NewUserConnection() (*Conn, error) {
return NewUserConnectionContext(context.Background())
}
-// NewUserConnectionContext same as NewUserConnection with context
+// NewUserConnectionContext establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
func NewUserConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate)
})
}
-// NewSystemdConnection establishes a private, direct connection to systemd.
-// This can be used for communicating with systemd without a dbus daemon.
-// Callers should call Close() when done with the connection.
-// Deprecated: use NewSystemdConnectionContext instead
+// Deprecated: use NewSystemdConnectionContext instead.
func NewSystemdConnection() (*Conn, error) {
return NewSystemdConnectionContext(context.Background())
}
-// NewSystemdConnectionContext same as NewSystemdConnection with context
+// NewSystemdConnectionContext establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd.
@@ -174,12 +170,17 @@ func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) {
})
}
-// Close closes an established connection
+// Close closes an established connection.
func (c *Conn) Close() {
c.sysconn.Close()
c.sigconn.Close()
}
+// Connected returns whether conn is connected
+func (c *Conn) Connected() bool {
+ return c.sysconn.Connected() && c.sigconn.Connected()
+}
+
// NewConnection establishes a connection to a bus using a caller-supplied function.
// This allows connecting to remote buses through a user-supplied mechanism.
// The supplied function may be called multiple times, and should return independent connections.
@@ -217,7 +218,7 @@ func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
// interface. The value is returned in its string representation, as defined at
-// https://developer.gnome.org/glib/unstable/gvariant-text.html
+// https://developer.gnome.org/glib/unstable/gvariant-text.html.
func (c *Conn) GetManagerProperty(prop string) (string, error) {
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
if err != nil {
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
index 01879ba15..074148cb4 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
@@ -73,7 +73,12 @@ func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args
return jobID, nil
}
-// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// Deprecated: use StartUnitContext instead.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.StartUnitContext(context.Background(), name, mode, ch)
+}
+
+// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise
// specified by the mode string).
//
// Takes the unit to activate, plus a mode string. The mode needs to be one of
@@ -103,137 +108,124 @@ func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args
// should not be considered authoritative.
//
// If an error does occur, it will be returned to the user alongside a job ID of 0.
-// Deprecated: use StartUnitContext instead
-func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
- return c.StartUnitContext(context.Background(), name, mode, ch)
-}
-
-// StartUnitContext same as StartUnit with context
func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
}
-// StopUnit is similar to StartUnit but stops the specified unit rather
-// than starting it.
-// Deprecated: use StopUnitContext instead
+// Deprecated: use StopUnitContext instead.
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
return c.StopUnitContext(context.Background(), name, mode, ch)
}
-// StopUnitContext same as StopUnit with context
+// StopUnitContext is similar to StartUnitContext, but stops the specified unit
+// rather than starting it.
func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
}
-// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
-// Deprecated: use ReloadUnitContext instead
+// Deprecated: use ReloadUnitContext instead.
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
return c.ReloadUnitContext(context.Background(), name, mode, ch)
}
-// ReloadUnitContext same as ReloadUnit with context
+// ReloadUnitContext reloads a unit. Reloading is done only if the unit
+// is already running, and fails otherwise.
func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
}
-// RestartUnit restarts a service. If a service is restarted that isn't
-// running it will be started.
-// Deprecated: use RestartUnitContext instead
+// Deprecated: use RestartUnitContext instead.
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.RestartUnitContext(context.Background(), name, mode, ch)
}
-// RestartUnitContext same as RestartUnit with context
+// RestartUnitContext restarts a service. If a service is restarted that isn't
+// running it will be started.
func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
}
-// TryRestartUnit is like RestartUnit, except that a service that isn't running
-// is not affected by the restart.
-// Deprecated: use TryRestartUnitContext instead
+// Deprecated: use TryRestartUnitContext instead.
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.TryRestartUnitContext(context.Background(), name, mode, ch)
}
-// TryRestartUnitContext same as TryRestartUnit with context
+// TryRestartUnitContext is like RestartUnitContext, except that a service that
+// isn't running is not affected by the restart.
func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
}
-// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart
-// otherwise.
-// Deprecated: use ReloadOrRestartUnitContext instead
+// Deprecated: use ReloadOrRestartUnitContext instead.
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch)
}
-// ReloadOrRestartUnitContext same as ReloadOrRestartUnit with context
+// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use
+// a restart otherwise.
func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
}
-// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try"
-// flavored restart otherwise.
-// Deprecated: use ReloadOrTryRestartUnitContext instead
+// Deprecated: use ReloadOrTryRestartUnitContext instead.
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch)
}
-// ReloadOrTryRestartUnitContext same as ReloadOrTryRestartUnit with context
+// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it,
+// and use a "Try" flavored restart otherwise.
func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
}
-// StartTransientUnit() may be used to create and start a transient unit, which
-// will be released as soon as it is not running or referenced anymore or the
-// system is rebooted. name is the unit name including suffix, and must be
-// unique. mode is the same as in StartUnit(), properties contains properties
-// of the unit.
-// Deprecated: use StartTransientUnitContext instead
+// Deprecated: use StartTransientUnitContext instead.
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch)
}
-// StartTransientUnitContext same as StartTransientUnit with context
+// StartTransientUnitContext may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnitContext, properties contains properties
+// of the unit.
func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) {
return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
}
-// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
-// processes are killed.
-// Deprecated: use KillUnitContext instead
+// Deprecated: use KillUnitContext instead.
func (c *Conn) KillUnit(name string, signal int32) {
c.KillUnitContext(context.Background(), name, signal)
}
-// KillUnitContext same as KillUnit with context
+// KillUnitContext takes the unit name and a UNIX signal number to send.
+// All of the unit's processes are killed.
func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) {
c.KillUnitWithTarget(ctx, name, All, signal)
}
-// KillUnitWithTarget is like KillUnitContext, but allows you to specify which process in the unit to send the signal to
+// KillUnitWithTarget is like KillUnitContext, but allows you to specify which
+// process in the unit to send the signal to.
func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error {
return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store()
}
-// ResetFailedUnit resets the "failed" state of a specific unit.
-// Deprecated: use ResetFailedUnitContext instead
+// Deprecated: use ResetFailedUnitContext instead.
func (c *Conn) ResetFailedUnit(name string) error {
return c.ResetFailedUnitContext(context.Background(), name)
}
-// ResetFailedUnitContext same as ResetFailedUnit with context
+// ResetFailedUnitContext resets the "failed" state of a specific unit.
func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error {
return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
}
-// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`.
-// Deprecated: use SystemStateContext instead
+// Deprecated: use SystemStateContext instead.
func (c *Conn) SystemState() (*Property, error) {
return c.SystemStateContext(context.Background())
}
-// SystemStateContext same as SystemState with context
+// SystemStateContext returns the systemd state. Equivalent to
+// systemctl is-system-running.
func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) {
var err error
var prop dbus.Variant
@@ -247,7 +239,7 @@ func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) {
return &Property{Name: "SystemState", Value: prop}, nil
}
-// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface
+// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface.
func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) {
var err error
var props map[string]dbus.Variant
@@ -270,36 +262,36 @@ func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInte
return out, nil
}
-// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties.
-// Deprecated: use GetUnitPropertiesContext instead
+// Deprecated: use GetUnitPropertiesContext instead.
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
return c.GetUnitPropertiesContext(context.Background(), unit)
}
-// GetUnitPropertiesContext same as GetUnitPropertiesContext with context
+// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of
+// its dbus object properties.
func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) {
path := unitPath(unit)
return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit")
}
-// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties.
-// Deprecated: use GetUnitPathPropertiesContext instead
+// Deprecated: use GetUnitPathPropertiesContext instead.
func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) {
return c.GetUnitPathPropertiesContext(context.Background(), path)
}
-// GetUnitPathPropertiesContext same as GetUnitPathProperties with context
+// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all
+// of its dbus object properties.
func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) {
return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit")
}
-// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties.
-// Deprecated: use GetAllPropertiesContext instead
+// Deprecated: use GetAllPropertiesContext instead.
func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) {
return c.GetAllPropertiesContext(context.Background(), unit)
}
-// GetAllPropertiesContext same as GetAllProperties with context
+// GetAllPropertiesContext takes the (unescaped) unit name and returns all of
+// its dbus object properties.
func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) {
path := unitPath(unit)
return c.getProperties(ctx, path, "")
@@ -323,64 +315,63 @@ func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface strin
return &Property{Name: propertyName, Value: prop}, nil
}
-// Deprecated: use GetUnitPropertyContext instead
+// Deprecated: use GetUnitPropertyContext instead.
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
return c.GetUnitPropertyContext(context.Background(), unit, propertyName)
}
-// GetUnitPropertyContext same as GetUnitProperty with context
+// GetUnitPropertyContext takes an (unescaped) unit name, and a property name,
+// and returns the property value.
func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) {
return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName)
}
-// GetServiceProperty returns property for given service name and property name
-// Deprecated: use GetServicePropertyContext instead
+// Deprecated: use GetServicePropertyContext instead.
func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
return c.GetServicePropertyContext(context.Background(), service, propertyName)
}
-// GetServicePropertyContext same as GetServiceProperty with context
+// GetServiceProperty returns property for given service name and property name.
func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) {
return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName)
}
-// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
-// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
-// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
-// Deprecated: use GetUnitTypePropertiesContext instead
+// Deprecated: use GetUnitTypePropertiesContext instead.
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType)
}
-// GetUnitTypePropertiesContext same as GetUnitTypeProperties with context
+// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope.
+// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit.
func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) {
path := unitPath(unit)
return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType)
}
-// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Deprecated: use SetUnitPropertiesContext instead.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...)
+}
+
+// SetUnitPropertiesContext may be used to modify certain unit properties at runtime.
// Not all properties may be changed at runtime, but many resource management
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
// instantly, and stored on disk for future boots, unless runtime is true, in which
// case the settings only apply until the next reboot. name is the name of the unit
// to modify. properties are the settings to set, encoded as an array of property
// name and value pairs.
-// Deprecated: use SetUnitPropertiesContext instead
-func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
- return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...)
-}
-
-// SetUnitPropertiesContext same as SetUnitProperties with context
func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error {
return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
}
-// Deprecated: use GetUnitTypePropertyContext instead
+// Deprecated: use GetUnitTypePropertyContext instead.
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName)
}
-// GetUnitTypePropertyContext same as GetUnitTypeProperty with context
+// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type,
+// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext.
func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) {
return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName)
}
@@ -426,58 +417,78 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
return status, nil
}
-// ListUnits returns an array with all currently loaded units. Note that
-// units may be known by multiple names at the same time, and hence there might
-// be more unit names loaded than actual units behind them.
-// Also note that a unit is only loaded if it is active and/or enabled.
-// Units that are both disabled and inactive will thus not be returned.
-// Deprecated: use ListUnitsContext instead
+// GetUnitByPID returns the unit object path of the unit a process ID
+// belongs to. It takes a UNIX PID and returns the object path. The PID must
+// refer to an existing system process
+func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) {
+ var result dbus.ObjectPath
+
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result)
+
+ return result, err
+}
+
+// GetUnitNameByPID returns the name of the unit a process ID belongs to. It
+// takes a UNIX PID and returns the object path. The PID must refer to an
+// existing system process
+func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) {
+ path, err := c.GetUnitByPID(ctx, pid)
+ if err != nil {
+ return "", err
+ }
+
+ return unitName(path), nil
+}
+
+// Deprecated: use ListUnitsContext instead.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.ListUnitsContext(context.Background())
}
-// ListUnitsContext same as ListUnits with context
+// ListUnitsContext returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+// Also note that a unit is only loaded if it is active and/or enabled.
+// Units that are both disabled and inactive will thus not be returned.
func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
}
-// ListUnitsFiltered returns an array with units filtered by state.
-// It takes a list of units' statuses to filter.
-// Deprecated: use ListUnitsFilteredContext instead
+// Deprecated: use ListUnitsFilteredContext instead.
func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
return c.ListUnitsFilteredContext(context.Background(), states)
}
-// ListUnitsFilteredContext same as ListUnitsFiltered with context
+// ListUnitsFilteredContext returns an array with units filtered by state.
+// It takes a list of units' statuses to filter.
func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
}
-// ListUnitsByPatterns returns an array with units.
-// It takes a list of units' statuses and names to filter.
-// Note that units may be known by multiple names at the same time,
-// and hence there might be more unit names loaded than actual units behind them.
-// Deprecated: use ListUnitsByPatternsContext instead
+// Deprecated: use ListUnitsByPatternsContext instead.
func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
return c.ListUnitsByPatternsContext(context.Background(), states, patterns)
}
-// ListUnitsByPatternsContext same as ListUnitsByPatterns with context
+// ListUnitsByPatternsContext returns an array with units.
+// It takes a list of units' statuses and names to filter.
+// Note that units may be known by multiple names at the same time,
+// and hence there might be more unit names loaded than actual units behind them.
func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
}
-// ListUnitsByNames returns an array with units. It takes a list of units'
-// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
-// method, this method returns statuses even for inactive or non-existing
-// units. Input array should contain exact unit names, but not patterns.
-// Note: Requires systemd v230 or higher
-// Deprecated: use ListUnitsByNamesContext instead
+// Deprecated: use ListUnitsByNamesContext instead.
func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
return c.ListUnitsByNamesContext(context.Background(), units)
}
-// ListUnitsByNamesContext same as ListUnitsByNames with context
+// ListUnitsByNamesContext returns an array with units. It takes a list of units'
+// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext
+// method, this method returns statuses even for inactive or non-existing
+// units. Input array should contain exact unit names, but not patterns.
+//
+// Requires systemd v230 or higher.
func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
}
@@ -513,37 +524,43 @@ func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
return files, nil
}
-// ListUnitFiles returns an array of all available units on disk.
-// Deprecated: use ListUnitFilesContext instead
+// Deprecated: use ListUnitFilesContext instead.
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
return c.ListUnitFilesContext(context.Background())
}
-// ListUnitFilesContext same as ListUnitFiles with context
+// ListUnitFiles returns an array of all available units on disk.
func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
}
-// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
-// Deprecated: use ListUnitFilesByPatternsContext instead
+// Deprecated: use ListUnitFilesByPatternsContext instead.
func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns)
}
-// ListUnitFilesByPatternsContext same as ListUnitFilesByPatterns with context
+// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns.
func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
}
type LinkUnitFileChange EnableUnitFileChange
-// LinkUnitFiles() links unit files (that are located outside of the
+// Deprecated: use LinkUnitFilesContext instead.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ return c.LinkUnitFilesContext(context.Background(), files, runtime, force)
+}
+
+// LinkUnitFilesContext links unit files (that are located outside of the
// usual unit search paths) into the unit search path.
//
// It takes a list of absolute paths to unit files to link and two
-// booleans. The first boolean controls whether the unit shall be
+// booleans.
+//
+// The first boolean controls whether the unit shall be
// enabled for runtime only (true, /run), or persistently (false,
// /etc).
+//
// The second controls whether symlinks pointing to other units shall
// be replaced if necessary.
//
@@ -551,12 +568,6 @@ type LinkUnitFileChange EnableUnitFileChange
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
-// Deprecated: use LinkUnitFilesContext instead
-func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
- return c.LinkUnitFilesContext(context.Background(), files, runtime, force)
-}
-
-// LinkUnitFilesContext same as LinkUnitFiles with context
func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
@@ -583,8 +594,13 @@ func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime
return changes, nil
}
-// EnableUnitFiles() may be used to enable one or more units in the system (by
-// creating symlinks to them in /etc or /run).
+// Deprecated: use EnableUnitFilesContext instead.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ return c.EnableUnitFilesContext(context.Background(), files, runtime, force)
+}
+
+// EnableUnitFilesContext may be used to enable one or more units in the system
+// (by creating symlinks to them in /etc or /run).
//
// It takes a list of unit files to enable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
@@ -599,12 +615,6 @@ func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
-// Deprecated: use EnableUnitFilesContext instead
-func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
- return c.EnableUnitFilesContext(context.Background(), files, runtime, force)
-}
-
-// EnableUnitFilesContext same as EnableUnitFiles with context
func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
var carries_install_info bool
@@ -639,8 +649,13 @@ type EnableUnitFileChange struct {
Destination string // Destination of the symlink
}
-// DisableUnitFiles() may be used to disable one or more units in the system (by
-// removing symlinks to them from /etc or /run).
+// Deprecated: use DisableUnitFilesContext instead.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ return c.DisableUnitFilesContext(context.Background(), files, runtime)
+}
+
+// DisableUnitFilesContext may be used to disable one or more units in the
+// system (by removing symlinks to them from /etc or /run).
//
// It takes a list of unit files to disable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
@@ -651,12 +666,6 @@ type EnableUnitFileChange struct {
// consists of structures with three strings: the type of the change (one of
// symlink or unlink), the file name of the symlink and the destination of the
// symlink.
-// Deprecated: use DisableUnitFilesContext instead
-func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
- return c.DisableUnitFilesContext(context.Background(), files, runtime)
-}
-
-// DisableUnitFilesContext same as DisableUnitFiles with context
func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
@@ -689,21 +698,20 @@ type DisableUnitFileChange struct {
Destination string // Destination of the symlink
}
-// MaskUnitFiles masks one or more units in the system
-//
-// It takes three arguments:
-// * list of units to mask (either just file names or full
-// absolute paths if the unit files are residing outside
-// the usual unit search paths)
-// * runtime to specify whether the unit was enabled for runtime
-// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
-// * force flag
-// Deprecated: use MaskUnitFilesContext instead
+// Deprecated: use MaskUnitFilesContext instead.
func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
return c.MaskUnitFilesContext(context.Background(), files, runtime, force)
}
-// MaskUnitFilesContext same as MaskUnitFiles with context
+// MaskUnitFilesContext masks one or more units in the system.
+//
+// The files argument contains a list of units to mask (either just file names
+// or full absolute paths if the unit files are residing outside the usual unit
+// search paths).
+//
+// The runtime argument is used to specify whether the unit was enabled for
+// runtime only (true, /run/systemd/..), or persistently (false,
+// /etc/systemd/..).
func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
@@ -736,20 +744,18 @@ type MaskUnitFileChange struct {
Destination string // Destination of the symlink
}
-// UnmaskUnitFiles unmasks one or more units in the system
-//
-// It takes two arguments:
-// * list of unit files to mask (either just file names or full
-// absolute paths if the unit files are residing outside
-// the usual unit search paths)
-// * runtime to specify whether the unit was enabled for runtime
-// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
-// Deprecated: use UnmaskUnitFilesContext instead
+// Deprecated: use UnmaskUnitFilesContext instead.
func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
return c.UnmaskUnitFilesContext(context.Background(), files, runtime)
}
-// UnmaskUnitFilesContext same as UnmaskUnitFiles with context
+// UnmaskUnitFilesContext unmasks one or more units in the system.
+//
+// It takes the list of unit files to mask (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit search
+// paths), and a boolean runtime flag to specify whether the unit was enabled
+// for runtime only (true, /run/systemd/..), or persistently (false,
+// /etc/systemd/..).
func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
@@ -782,14 +788,13 @@ type UnmaskUnitFileChange struct {
Destination string // Destination of the symlink
}
-// Reload instructs systemd to scan for and reload unit files. This is
-// equivalent to a 'systemctl daemon-reload'.
-// Deprecated: use ReloadContext instead
+// Deprecated: use ReloadContext instead.
func (c *Conn) Reload() error {
return c.ReloadContext(context.Background())
}
-// ReloadContext same as Reload with context
+// ReloadContext instructs systemd to scan for and reload unit files. This is
+// an equivalent to systemctl daemon-reload.
func (c *Conn) ReloadContext(ctx context.Context) error {
return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store()
}
@@ -798,12 +803,12 @@ func unitPath(name string) dbus.ObjectPath {
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
}
-// unitName returns the unescaped base element of the supplied escaped path
+// unitName returns the unescaped base element of the supplied escaped path.
func unitName(dpath dbus.ObjectPath) string {
return pathBusUnescape(path.Base(string(dpath)))
}
-// Currently queued job definition
+// JobStatus holds a currently queued job definition.
type JobStatus struct {
Id uint32 // The numeric job id
Unit string // The primary unit name for this job
@@ -813,13 +818,12 @@ type JobStatus struct {
UnitPath dbus.ObjectPath // The unit object path
}
-// ListJobs returns an array with all currently queued jobs
-// Deprecated: use ListJobsContext instead
+// Deprecated: use ListJobsContext instead.
func (c *Conn) ListJobs() ([]JobStatus, error) {
return c.ListJobsContext(context.Background())
}
-// ListJobsContext same as ListJobs with context
+// ListJobsContext returns an array with all currently queued jobs.
func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) {
return c.listJobsInternal(ctx)
}
@@ -847,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) {
return status, nil
}
+
+// Freeze the cgroup associated with the unit.
+// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2.
+func (c *Conn) FreezeUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store()
+}
+
+// Unfreeze the cgroup associated with the unit.
+func (c *Conn) ThawUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store()
+}
diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE
new file mode 100644
index 000000000..591211595
--- /dev/null
+++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE
@@ -0,0 +1,13 @@
+ Copyright 2018 Anders Rundgren
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go
new file mode 100644
index 000000000..92574a3f4
--- /dev/null
+++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go
@@ -0,0 +1,71 @@
+//
+// Copyright 2006-2019 WebPKI.org (http://webpki.org).
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This package converts numbers in IEEE-754 double precision into the
+// format specified for JSON in EcmaScript Version 6 and forward.
+// The core application for this is canonicalization:
+// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02
+
+package jsoncanonicalizer
+
+import (
+ "errors"
+ "math"
+ "strconv"
+ "strings"
+)
+
+const invalidPattern uint64 = 0x7ff0000000000000
+
+func NumberToJSON(ieeeF64 float64) (res string, err error) {
+ ieeeU64 := math.Float64bits(ieeeF64)
+
+ // Special case: NaN and Infinity are invalid in JSON
+ if (ieeeU64 & invalidPattern) == invalidPattern {
+ return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16))
+ }
+
+ // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications
+ if ieeeF64 == 0 { // Right, this line takes both -0 and 0
+ return "0", nil
+ }
+
+ // Deal with the sign separately
+ var sign string = ""
+ if ieeeF64 < 0 {
+ ieeeF64 =-ieeeF64
+ sign = "-"
+ }
+
+ // ES6 has a unique "g" format
+ var format byte = 'e'
+ if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 {
+ format = 'f'
+ }
+
+ // The following should do the trick:
+ es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64)
+
+ // Minor cleanup
+ exponent := strings.IndexByte(es6Formatted, 'e')
+ if exponent > 0 {
+ // Go outputs "1e+09" which must be rewritten as "1e+9"
+ if es6Formatted[exponent + 2] == '0' {
+ es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:]
+ }
+ }
+ return sign + es6Formatted, nil
+}
diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go
new file mode 100644
index 000000000..661f41055
--- /dev/null
+++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go
@@ -0,0 +1,378 @@
+//
+// Copyright 2006-2019 WebPKI.org (http://webpki.org).
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This package transforms JSON data in UTF-8 according to:
+// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02
+
+package jsoncanonicalizer
+
+import (
+ "errors"
+ "container/list"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf16"
+)
+
+type nameValueType struct {
+ name string
+ sortKey []uint16
+ value string
+}
+
+// JSON standard escapes (modulo \u)
+var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'}
+var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'}
+
+// JSON literals
+var literals = []string{"true", "false", "null"}
+
+func Transform(jsonData []byte) (result []byte, e error) {
+
+ // JSON data MUST be UTF-8 encoded
+ var jsonDataLength int = len(jsonData)
+
+ // Current pointer in jsonData
+ var index int = 0
+
+ // "Forward" declarations are needed for closures referring each other
+ var parseElement func() string
+ var parseSimpleType func() string
+ var parseQuotedString func() string
+ var parseObject func() string
+ var parseArray func() string
+
+ var globalError error = nil
+
+ checkError := func(e error) {
+ // We only honor the first reported error
+ if globalError == nil {
+ globalError = e
+ }
+ }
+
+ setError := func(msg string) {
+ checkError(errors.New(msg))
+ }
+
+ isWhiteSpace := func(c byte) bool {
+ return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09
+ }
+
+ nextChar := func() byte {
+ if index < jsonDataLength {
+ c := jsonData[index]
+ if c > 0x7f {
+ setError("Unexpected non-ASCII character")
+ }
+ index++
+ return c
+ }
+ setError("Unexpected EOF reached")
+ return '"'
+ }
+
+ scan := func() byte {
+ for {
+ c := nextChar()
+ if isWhiteSpace(c) {
+ continue;
+ }
+ return c
+ }
+ }
+
+ scanFor := func(expected byte) {
+ c := scan()
+ if c != expected {
+ setError("Expected '" + string(expected) + "' but got '" + string(c) + "'")
+ }
+ }
+
+ getUEscape := func() rune {
+ start := index
+ nextChar()
+ nextChar()
+ nextChar()
+ nextChar()
+ if globalError != nil {
+ return 0
+ }
+ u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64)
+ checkError(err)
+ return rune(u16)
+ }
+
+ testNextNonWhiteSpaceChar := func() byte {
+ save := index
+ c := scan()
+ index = save
+ return c
+ }
+
+ decorateString := func(rawUTF8 string) string {
+ var quotedString strings.Builder
+ quotedString.WriteByte('"')
+ CoreLoop:
+ for _, c := range []byte(rawUTF8) {
+ // Is this within the JSON standard escapes?
+ for i, esc := range binaryEscapes {
+ if esc == c {
+ quotedString.WriteByte('\\')
+ quotedString.WriteByte(asciiEscapes[i])
+ continue CoreLoop
+ }
+ }
+ if c < 0x20 {
+ // Other ASCII control characters must be escaped with \uhhhh
+ quotedString.WriteString(fmt.Sprintf("\\u%04x", c))
+ } else {
+ quotedString.WriteByte(c)
+ }
+ }
+ quotedString.WriteByte('"')
+ return quotedString.String()
+ }
+
+ parseQuotedString = func() string {
+ var rawString strings.Builder
+ CoreLoop:
+ for globalError == nil {
+ var c byte
+ if index < jsonDataLength {
+ c = jsonData[index]
+ index++
+ } else {
+ nextChar()
+ break
+ }
+ if (c == '"') {
+ break;
+ }
+ if c < ' ' {
+ setError("Unterminated string literal")
+ } else if c == '\\' {
+ // Escape sequence
+ c = nextChar()
+ if c == 'u' {
+ // The \u escape
+ firstUTF16 := getUEscape()
+ if utf16.IsSurrogate(firstUTF16) {
+ // If the first UTF-16 code unit has a certain value there must be
+ // another succeeding UTF-16 code unit as well
+ if nextChar() != '\\' || nextChar() != 'u' {
+ setError("Missing surrogate")
+ } else {
+ // Output the UTF-32 code point as UTF-8
+ rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape()))
+ }
+ } else {
+ // Single UTF-16 code identical to UTF-32. Output as UTF-8
+ rawString.WriteRune(firstUTF16)
+ }
+ } else if c == '/' {
+ // Benign but useless escape
+ rawString.WriteByte('/')
+ } else {
+ // The JSON standard escapes
+ for i, esc := range asciiEscapes {
+ if esc == c {
+ rawString.WriteByte(binaryEscapes[i])
+ continue CoreLoop
+ }
+ }
+ setError("Unexpected escape: \\" + string(c))
+ }
+ } else {
+ // Just an ordinary ASCII character alternatively a UTF-8 byte
+ // outside of ASCII.
+ // Note that properly formatted UTF-8 never clashes with ASCII
+ // making byte per byte search for ASCII break characters work
+ // as expected.
+ rawString.WriteByte(c)
+ }
+ }
+ return rawString.String()
+ }
+
+ parseSimpleType = func() string {
+ var token strings.Builder
+ index--
+ for globalError == nil {
+ c := testNextNonWhiteSpaceChar()
+ if c == ',' || c == ']' || c == '}' {
+ break;
+ }
+ c = nextChar()
+ if isWhiteSpace(c) {
+ break
+ }
+ token.WriteByte(c)
+ }
+ if token.Len() == 0 {
+ setError("Missing argument")
+ }
+ value := token.String()
+ // Is it a JSON literal?
+ for _, literal := range literals {
+ if literal == value {
+ return literal
+ }
+ }
+ // Apparently not so we assume that it is a I-JSON number
+ ieeeF64, err := strconv.ParseFloat(value, 64)
+ checkError(err)
+ value, err = NumberToJSON(ieeeF64)
+ checkError(err)
+ return value
+ }
+
+ parseElement = func() string {
+ switch scan() {
+ case '{':
+ return parseObject()
+ case '"':
+ return decorateString(parseQuotedString())
+ case '[':
+ return parseArray()
+ default:
+ return parseSimpleType()
+ }
+ }
+
+ parseArray = func() string {
+ var arrayData strings.Builder
+ arrayData.WriteByte('[')
+ var next bool = false
+ for globalError == nil && testNextNonWhiteSpaceChar() != ']' {
+ if next {
+ scanFor(',')
+ arrayData.WriteByte(',')
+ } else {
+ next = true
+ }
+ arrayData.WriteString(parseElement())
+ }
+ scan()
+ arrayData.WriteByte(']')
+ return arrayData.String()
+ }
+
+ lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool {
+ // Find the minimum length of the sortKeys
+ oldSortKey := e.Value.(nameValueType).sortKey
+ minLength := len(oldSortKey)
+ if minLength > len(sortKey) {
+ minLength = len(sortKey)
+ }
+ for q := 0; q < minLength; q++ {
+ diff := int(sortKey[q]) - int(oldSortKey[q])
+ if diff < 0 {
+ // Smaller => Precedes
+ return true
+ } else if diff > 0 {
+ // Bigger => No match
+ return false
+ }
+ // Still equal => Continue
+ }
+ // The sortKeys compared equal up to minLength
+ if len(sortKey) < len(oldSortKey) {
+ // Shorter => Precedes
+ return true
+ }
+ if len(sortKey) == len(oldSortKey) {
+ setError("Duplicate key: " + e.Value.(nameValueType).name)
+ }
+ // Longer => No match
+ return false
+ }
+
+ parseObject = func() string {
+ nameValueList := list.New()
+ var next bool = false
+ CoreLoop:
+ for globalError == nil && testNextNonWhiteSpaceChar() != '}' {
+ if next {
+ scanFor(',')
+ }
+ next = true
+ scanFor('"')
+ rawUTF8 := parseQuotedString()
+ if globalError != nil {
+ break;
+ }
+ // Sort keys on UTF-16 code units
+ // Since UTF-8 doesn't have endianess this is just a value transformation
+ // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16
+ sortKey := utf16.Encode([]rune(rawUTF8))
+ scanFor(':')
+ nameValue := nameValueType{rawUTF8, sortKey, parseElement()}
+ for e := nameValueList.Front(); e != nil; e = e.Next() {
+ // Check if the key is smaller than a previous key
+ if lexicographicallyPrecedes(sortKey, e) {
+ // Precedes => Insert before and exit sorting
+ nameValueList.InsertBefore(nameValue, e)
+ continue CoreLoop
+ }
+ // Continue searching for a possibly succeeding sortKey
+ // (which is straightforward since the list is ordered)
+ }
+ // The sortKey is either the first or is succeeding all previous sortKeys
+ nameValueList.PushBack(nameValue)
+ }
+ // Scan away '}'
+ scan()
+ // Now everything is sorted so we can properly serialize the object
+ var objectData strings.Builder
+ objectData.WriteByte('{')
+ next = false
+ for e := nameValueList.Front(); e != nil; e = e.Next() {
+ if next {
+ objectData.WriteByte(',')
+ }
+ next = true
+ nameValue := e.Value.(nameValueType)
+ objectData.WriteString(decorateString(nameValue.name))
+ objectData.WriteByte(':')
+ objectData.WriteString(nameValue.value)
+ }
+ objectData.WriteByte('}')
+ return objectData.String()
+ }
+
+ /////////////////////////////////////////////////
+ // This is where Transform actually begins... //
+ /////////////////////////////////////////////////
+ var transformed string
+
+ if testNextNonWhiteSpaceChar() == '[' {
+ scan()
+ transformed = parseArray()
+ } else {
+ scanFor('{')
+ transformed = parseObject()
+ }
+ for index < jsonDataLength {
+ if !isWhiteSpace(jsonData[index]) {
+ setError("Improperly terminated JSON object")
+ break;
+ }
+ index++
+ }
+ return []byte(transformed), globalError
+}
\ No newline at end of file
diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
deleted file mode 100644
index 3938f3834..000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (C) 2017 SUSE LLC. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-language: go
-go:
- - 1.7.x
- - 1.8.x
- - tip
-
-os:
- - linux
- - osx
-
-script:
- - go test -cover -v ./...
-
-notifications:
- email: false
diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
new file mode 100644
index 000000000..04b5685ab
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
@@ -0,0 +1,178 @@
+# Changelog #
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+## [Unreleased] ##
+
+## [0.3.4] - 2024-10-09 ##
+
+### Fixed ###
+- Previously, some testing mocks we had resulted in us doing `import "testing"`
+ in non-`_test.go` code, which made some downstreams like Kubernetes unhappy.
+ This has been fixed. (#32)
+
+## [0.3.3] - 2024-09-30 ##
+
+### Fixed ###
+- The mode and owner verification logic in `MkdirAll` has been removed. This
+ was originally intended to protect against some theoretical attacks but upon
+ further consideration these protections don't actually buy us anything and
+ they were causing spurious errors with more complicated filesystem setups.
+- The "is the created directory empty" logic in `MkdirAll` has also been
+ removed. This was not causing us issues yet, but some pseudofilesystems (such
+ as `cgroup`) create non-empty directories and so this logic would've been
+ wrong for such cases.
+
+## [0.3.2] - 2024-09-13 ##
+
+### Changed ###
+- Passing the `S_ISUID` or `S_ISGID` modes to `MkdirAllInRoot` will now return
+ an explicit error saying that those bits are ignored by `mkdirat(2)`. In the
+ past a different error was returned, but since the silent ignoring behaviour
+ is codified in the man pages a more explicit error seems apt. While silently
+ ignoring these bits would be the most compatible option, it could lead to
+ users thinking their code sets these bits when it doesn't. Programs that need
+ to deal with compatibility can mask the bits themselves. (#23, #25)
+
+### Fixed ###
+- If a directory has `S_ISGID` set, then all child directories will have
+ `S_ISGID` set when created and a different gid will be used for any inode
+ created under the directory. Previously, the "expected owner and mode"
+ validation in `securejoin.MkdirAll` did not correctly handle this. We now
+ correctly handle this case. (#24, #25)
+
+## [0.3.1] - 2024-07-23 ##
+
+### Changed ###
+- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll`
+ to do the necessary "partial lookups", `Open(at)InRoot` now does less work
+ for both implementations (resulting in a many-fold decrease in the number of
+ operations for `openat2`, and a modest improvement for non-`openat2`) and is
+ far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)`
+ behaviour.
+- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this
+ effectively just means that we no longer risk getting spurious errors during
+ rename races. However, for our hardened procfs handler, this in theory should
+ prevent mount attacks from tricking us when doing magic-link readlinks (even
+ when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still
+ potentially vulnerable to those kinds of somewhat-esoteric attacks.
+
+ Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath]
+ but it seems incredibly unlikely anyone is using `filepath-securejoin` on a
+ pre-2011 kernel.
+
+### Fixed ###
+- Several improvements were made to the errors returned by `Open(at)InRoot` and
+ `MkdirAll` when dealing with invalid paths under the emulated (ie.
+ non-`openat2`) implementation. Previously, some paths would return the wrong
+ error (`ENOENT` when the last component was a non-directory), and other paths
+ would be returned as though they were acceptable (trailing-slash components
+ after a non-directory would be ignored by `Open(at)InRoot`).
+
+ These changes were done to match `openat2`'s behaviour and purely is a
+ consistency fix (most users are going to be using `openat2` anyway).
+
+[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d
+
+## [0.3.0] - 2024-07-11 ##
+
+### Added ###
+- A new set of `*os.File`-based APIs have been added. These are adapted from
+ [libpathrs][] and we strongly suggest using them if possible (as they provide
+ far more protection against attacks than `SecureJoin`):
+
+ - `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File`
+ handle to the path. Note that the handle returned is an `O_PATH` handle,
+ which cannot be used for reading or writing (as well as some other
+ operations -- [see open(2) for more details][open.2])
+
+ - `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade
+ it to a regular handle. This can also be used with non-`O_PATH` handles,
+ but `O_PATH` is the most obvious application.
+
+ - `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to
+ create a directory tree within a rootfs.
+
+ As these are new APIs, they may change in the future. However, they should be
+ safe to start migrating to as we have extensive tests ensuring they behave
+ correctly and are safe against various races and other attacks.
+
+[libpathrs]: https://github.com/openSUSE/libpathrs
+[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html
+
+## [0.2.5] - 2024-05-03 ##
+
+### Changed ###
+- Some minor changes were made to how lexical components (like `..` and `.`)
+ are handled during path generation in `SecureJoin`. There is no behaviour
+ change as a result of this fix (the resulting paths are the same).
+
+### Fixed ###
+- The error returned when we hit a symlink loop now references the correct
+ path. (#10)
+
+## [0.2.4] - 2023-09-06 ##
+
+### Security ###
+- This release fixes a potential security issue in filepath-securejoin when
+ used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate
+ paths outside of the provided rootfs in certain cases), as well as improving
+ the overall behaviour of filepath-securejoin when dealing with Windows paths
+ that contain volume names. Thanks to Paulo Gomes for discovering and fixing
+ these issues.
+
+### Fixed ###
+- Switch to GitHub Actions for CI so we can test on Windows as well as Linux
+ and MacOS.
+
+[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8
+
+## [0.2.3] - 2021-06-04 ##
+
+### Changed ###
+- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency
+ on `github.com/pkg/errors`.
+
+## [0.2.2] - 2018-09-05 ##
+
+### Changed ###
+- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own
+ (internal) error. This allows callers to more easily use `errors.Is` to check
+ for this case.
+
+## [0.2.1] - 2018-09-05 ##
+
+### Fixed ###
+- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR`
+ properly within `SecureJoin`.
+
+## [0.2.0] - 2017-07-19 ##
+
+We now have 100% test coverage!
+
+### Added ###
+- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new
+ tests) or for implementing custom handling of lookup operations (such as for
+ rootless containers, where work is necessary to access directories with weird
+ modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`).
+
+## 0.1.0 - 2017-07-19
+
+This is our first release of `github.com/cyphar/filepath-securejoin`,
+containing a full implementation with a coverage of 93.5% (the only missing
+cases are the error cases, which are hard to mocktest at the moment).
+
+[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...HEAD
+[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4
+[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3
+[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2
+[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1
+[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0
+[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5
+[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4
+[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3
+[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2
+[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0
diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
index bec842f29..cb1ab88da 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/LICENSE
+++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
@@ -1,5 +1,5 @@
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
-Copyright (C) 2017 SUSE LLC. All rights reserved.
+Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
index 49b2baa9f..eaeb53fcd 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/README.md
+++ b/vendor/github.com/cyphar/filepath-securejoin/README.md
@@ -1,23 +1,30 @@
## `filepath-securejoin` ##
-[](https://travis-ci.org/cyphar/filepath-securejoin)
+[](https://pkg.go.dev/github.com/cyphar/filepath-securejoin)
+[](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
-An implementation of `SecureJoin`, a [candidate for inclusion in the Go
-standard library][go#20126]. The purpose of this function is to be a "secure"
-alternative to `filepath.Join`, and in particular it provides certain
-guarantees that are not provided by `filepath.Join`.
+### Old API ###
-This is the function prototype:
+This library was originally just an implementation of `SecureJoin` which was
+[intended to be included in the Go standard library][go#20126] as a safer
+`filepath.Join` that would restrict the path lookup to be inside a root
+directory.
-```go
-func SecureJoin(root, unsafePath string) (string, error)
-```
+The implementation was based on code that existed in several container
+runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers
+that can modify path components after `SecureJoin` returns and before the
+caller uses the path, allowing for some fairly trivial TOCTOU attacks.
-This library **guarantees** the following:
+`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to
+support legacy users, but new users are strongly suggested to avoid using
+`SecureJoin` and instead use the [new api](#new-api) or switch to
+[libpathrs][libpathrs].
+
+With the above limitations in mind, this library guarantees the following:
* If no error is set, the resulting string **must** be a child path of
- `SecureJoin` and will not contain any symlink path components (they will all
- be expanded).
+ `root` and will not contain any symlink path components (they will all be
+ expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
@@ -25,7 +32,7 @@ This library **guarantees** the following:
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
-* Non-existant path components are unaffected by `SecureJoin` (similar to
+* Non-existent path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
@@ -34,7 +41,7 @@ This library **guarantees** the following:
A (trivial) implementation of this function on GNU/Linux systems could be done
with the following (note that this requires root privileges and is far more
opaque than the implementation in this library, and also requires that
-`readlink` is inside the `root` path):
+`readlink` is inside the `root` path and is trustworthy):
```go
package securejoin
@@ -57,8 +64,105 @@ func SecureJoin(root, unsafePath string) (string, error) {
}
```
+[libpathrs]: https://github.com/openSUSE/libpathrs
[go#20126]: https://github.com/golang/go/issues/20126
+### New API ###
+
+While we recommend users switch to [libpathrs][libpathrs] as soon as it has a
+stable release, some methods implemented by libpathrs have been ported to this
+library to ease the transition. These APIs are only supported on Linux.
+
+These APIs are implemented such that `filepath-securejoin` will
+opportunistically use certain newer kernel APIs that make these operations far
+more secure. In particular:
+
+* All of the lookup operations will use [`openat2`][openat2.2] on new enough
+ kernels (Linux 5.6 or later) to restrict lookups through magic-links and
+ bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to
+ efficiently resolve symlinks within a rootfs.
+
+* The APIs provide hardening against a malicious `/proc` mount to either detect
+ or avoid being tricked by a `/proc` that is not legitimate. This is done
+ using [`openat2`][openat2.2] for all users, and privileged users will also be
+ further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2]
+ (Linux 5.2 or later).
+
+[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html
+[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md
+[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md
+
+#### `OpenInRoot` ####
+
+```go
+func OpenInRoot(root, unsafePath string) (*os.File, error)
+func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error)
+func Reopen(handle *os.File, flags int) (*os.File, error)
+```
+
+`OpenInRoot` is a much safer version of
+
+```go
+path, err := securejoin.SecureJoin(root, unsafePath)
+file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC)
+```
+
+that protects against various race attacks that could lead to serious security
+issues, depending on the application. Note that the returned `*os.File` is an
+`O_PATH` file descriptor, which is quite restricted. Callers will probably need
+to use `Reopen` to get a more usable handle (this split is done to provide
+useful features like PTY spawning and to avoid users accidentally opening bad
+inodes that could cause a DoS).
+
+Callers need to be careful in how they use the returned `*os.File`. Usually it
+is only safe to operate on the handle directly, and it is very easy to create a
+security issue. [libpathrs][libpathrs] provides far more helpers to make using
+these handles safer -- there is currently no plan to port them to
+`filepath-securejoin`.
+
+`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an
+`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or
+`MkdirAllHandle`) calls are operating on the same rootfs.
+
+> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits
+> a dangling symlink or non-existent path. This is in contrast to `SecureJoin`
+> which treated non-existent components as though they were real directories,
+> and would allow for partial resolution of dangling symlinks. These behaviours
+> are at odds with how Linux treats non-existent paths and dangling symlinks,
+> and so these are no longer allowed.
+
+#### `MkdirAll` ####
+
+```go
+func MkdirAll(root, unsafePath string, mode int) error
+func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error)
+```
+
+`MkdirAll` is a much safer version of
+
+```go
+path, err := securejoin.SecureJoin(root, unsafePath)
+err = os.MkdirAll(path, mode)
+```
+
+that protects against the same kinds of races that `OpenInRoot` protects
+against.
+
+`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an
+`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an
+`*os.File` of the final created directory is returned (this directory is
+guaranteed to be effectively identical to the directory created by
+`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot`
+after `MkdirAll`).
+
+> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits
+> a dangling symlink or non-existent path. This is in contrast to `SecureJoin`
+> which treated non-existent components as though they were real directories,
+> and would allow for partial resolution of dangling symlinks. These behaviours
+> are at odds with how Linux treats non-existent paths and dangling symlinks,
+> and so these are no longer allowed. This means that `MkdirAll` will not
+> create non-existent directories referenced by a dangling symlink.
+
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
index ee1372d33..42045acae 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/VERSION
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -1 +1 @@
-0.2.2
+0.3.4
diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go
new file mode 100644
index 000000000..1ec7d065e
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go
@@ -0,0 +1,39 @@
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package securejoin implements a set of helpers to make it easier to write Go
+// code that is safe against symlink-related escape attacks. The primary idea
+// is to let you resolve a path within a rootfs directory as if the rootfs was
+// a chroot.
+//
+// securejoin has two APIs, a "legacy" API and a "modern" API.
+//
+// The legacy API is [SecureJoin] and [SecureJoinVFS]. These methods are
+// **not** safe against race conditions where an attacker changes the
+// filesystem after (or during) the [SecureJoin] operation.
+//
+// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived
+// functions). These are safe against racing attackers and have several other
+// protections that are not provided by the legacy API. There are many more
+// operations that most programs expect to be able to do safely, but we do not
+// provide explicit support for them because we want to encourage users to
+// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a
+// cross-language next-generation library that is entirely designed around
+// operating on paths safely.
+//
+// securejoin has been used by several container runtimes (Docker, runc,
+// Kubernetes, etc) for quite a few years as a de-facto standard for operating
+// on container filesystem paths "safely". However, most users still use the
+// legacy API which is unsafe against various attacks (there is a fairly long
+// history of CVEs in dependent as a result). Users should switch to the modern
+// API as soon as possible (or even better, switch to libpathrs).
+//
+// This project was initially intended to be included in the Go standard
+// library, but [it was rejected](https://go.dev/issue/20126). There is now a
+// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API
+// that shares some of the goals of filepath-securejoin. However, that design
+// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the
+// usecase of container runtimes and most system tools.
+package securejoin
diff --git a/vendor/github.com/cyphar/filepath-securejoin/go.mod b/vendor/github.com/cyphar/filepath-securejoin/go.mod
new file mode 100644
index 000000000..819b13eed
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/go.mod
@@ -0,0 +1,14 @@
+module github.com/cyphar/filepath-securejoin
+
+go 1.21
+
+require (
+ github.com/stretchr/testify v1.9.0
+ golang.org/x/sys v0.21.0
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/vendor/github.com/cyphar/filepath-securejoin/go.sum b/vendor/github.com/cyphar/filepath-securejoin/go.sum
new file mode 100644
index 000000000..90589653c
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/go.sum
@@ -0,0 +1,12 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
index c4ca3d713..e0ee3f2b5 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/join.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -1,134 +1,125 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
-// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package securejoin is an implementation of the hopefully-soon-to-be-included
-// SecureJoin helper that is meant to be part of the "path/filepath" package.
-// The purpose of this project is to provide a PoC implementation to make the
-// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
-// tangible.
package securejoin
import (
- "bytes"
+ "errors"
"os"
"path/filepath"
"strings"
"syscall"
-
- "github.com/pkg/errors"
)
-// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
-// evaluated in attempting to securely join the two given paths.
-var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
+const maxSymlinkLimit = 255
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
-// effectively a more broad version of os.IsNotExist.
+// effectively a more broad version of [os.IsNotExist].
func IsNotExist(err error) bool {
- // If it's a bone-fide ENOENT just bail.
- if os.IsNotExist(errors.Cause(err)) {
- return true
- }
-
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
- var errno error
- switch err := errors.Cause(err).(type) {
- case *os.PathError:
- errno = err.Err
- case *os.LinkError:
- errno = err.Err
- case *os.SyscallError:
- errno = err.Err
- }
- return errno == syscall.ENOTDIR || errno == syscall.ENOENT
+ return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
-// SecureJoinVFS joins the two given path components (similar to Join) except
+// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
// given root treated as the root of the filesystem, similar to a chroot. The
-// filesystem state is evaluated through the given VFS interface (if nil, the
-// standard os.* family of functions are used).
+// filesystem state is evaluated through the given [VFS] interface (if nil, the
+// standard [os].* family of functions are used).
//
// Note that the guarantees provided by this function only apply if the path
// components in the returned string are not modified (in other words are not
// replaced with symlinks on the filesystem) after this function has returned.
-// Such a symlink race is necessarily out-of-scope of SecureJoin.
+// Such a symlink race is necessarily out-of-scope of SecureJoinVFS.
+//
+// NOTE: Due to the above limitation, Linux users are strongly encouraged to
+// use [OpenInRoot] instead, which does safely protect against these kinds of
+// attacks. There is no way to solve this problem with SecureJoinVFS because
+// the API is fundamentally wrong (you cannot return a "safe" path string and
+// guarantee it won't be modified afterwards).
+//
+// Volume names in unsafePath are always discarded, regardless if they are
+// provided via direct input or when evaluating symlinks. Therefore:
+//
+// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
}
- var path bytes.Buffer
- n := 0
- for unsafePath != "" {
- if n > 255 {
- return "", ErrSymlinkLoop
+ unsafePath = filepath.FromSlash(unsafePath)
+ var (
+ currentPath string
+ remainingPath = unsafePath
+ linksWalked int
+ )
+ for remainingPath != "" {
+ if v := filepath.VolumeName(remainingPath); v != "" {
+ remainingPath = remainingPath[len(v):]
}
- // Next path component, p.
- i := strings.IndexRune(unsafePath, filepath.Separator)
- var p string
- if i == -1 {
- p, unsafePath = unsafePath, ""
+ // Get the next path component.
+ var part string
+ if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 {
+ part, remainingPath = remainingPath, ""
} else {
- p, unsafePath = unsafePath[:i], unsafePath[i+1:]
+ part, remainingPath = remainingPath[:i], remainingPath[i+1:]
}
- // Create a cleaned path, using the lexical semantics of /../a, to
- // create a "scoped" path component which can safely be joined to fullP
- // for evaluation. At this point, path.String() doesn't contain any
- // symlink components.
- cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
- if cleanP == string(filepath.Separator) {
- path.Reset()
+ // Apply the component lexically to the path we are building.
+ // currentPath does not contain any symlinks, and we are lexically
+ // dealing with a single component, so it's okay to do a filepath.Clean
+ // here.
+ nextPath := filepath.Join(string(filepath.Separator), currentPath, part)
+ if nextPath == string(filepath.Separator) {
+ currentPath = ""
continue
}
- fullP := filepath.Clean(root + cleanP)
+ fullPath := root + string(filepath.Separator) + nextPath
// Figure out whether the path is a symlink.
- fi, err := vfs.Lstat(fullP)
+ fi, err := vfs.Lstat(fullPath)
if err != nil && !IsNotExist(err) {
return "", err
}
// Treat non-existent path components the same as non-symlinks (we
// can't do any better here).
if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
- path.WriteString(p)
- path.WriteRune(filepath.Separator)
+ currentPath = nextPath
continue
}
- // Only increment when we actually dereference a link.
- n++
+ // It's a symlink, so get its contents and expand it by prepending it
+ // to the yet-unparsed path.
+ linksWalked++
+ if linksWalked > maxSymlinkLimit {
+ return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP}
+ }
- // It's a symlink, expand it by prepending it to the yet-unparsed path.
- dest, err := vfs.Readlink(fullP)
+ dest, err := vfs.Readlink(fullPath)
if err != nil {
return "", err
}
+ remainingPath = dest + string(filepath.Separator) + remainingPath
// Absolute symlinks reset any work we've already done.
if filepath.IsAbs(dest) {
- path.Reset()
+ currentPath = ""
}
- unsafePath = dest + string(filepath.Separator) + unsafePath
}
- // We have to clean path.String() here because it may contain '..'
- // components that are entirely lexical, but would be misleading otherwise.
- // And finally do a final clean to ensure that root is also lexically
- // clean.
- fullP := filepath.Clean(string(filepath.Separator) + path.String())
- return filepath.Clean(root + fullP), nil
+ // There should be no lexical components like ".." left in the path here,
+ // but for safety clean up the path before joining it to the root.
+ finalPath := filepath.Join(string(filepath.Separator), currentPath)
+ return filepath.Join(root, finalPath), nil
}
-// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
-// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
+// SecureJoin is a wrapper around [SecureJoinVFS] that just uses the [os].* library
+// of functions as the [VFS]. If in doubt, use this function over [SecureJoinVFS].
func SecureJoin(root, unsafePath string) (string, error) {
return SecureJoinVFS(root, unsafePath, nil)
}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
new file mode 100644
index 000000000..290befa15
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
@@ -0,0 +1,389 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+type symlinkStackEntry struct {
+ // (dir, remainingPath) is what we would've returned if the link didn't
+ // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in
+ // this case.
+ dir *os.File
+ remainingPath string
+ // linkUnwalked is the remaining path components from the original
+ // Readlink which we have yet to walk. When this slice is empty, we
+ // drop the link from the stack.
+ linkUnwalked []string
+}
+
+func (se symlinkStackEntry) String() string {
+ return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/"))
+}
+
+func (se symlinkStackEntry) Close() {
+ _ = se.dir.Close()
+}
+
+type symlinkStack []*symlinkStackEntry
+
+func (s *symlinkStack) IsEmpty() bool {
+ return s == nil || len(*s) == 0
+}
+
+func (s *symlinkStack) Close() {
+ if s != nil {
+ for _, link := range *s {
+ link.Close()
+ }
+ // TODO: Switch to clear once we switch to Go 1.21.
+ *s = nil
+ }
+}
+
+var (
+ errEmptyStack = errors.New("[internal] stack is empty")
+ errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack")
+)
+
+func (s *symlinkStack) popPart(part string) error {
+ if s == nil || s.IsEmpty() {
+ // If there is nothing in the symlink stack, then the part was from the
+ // real path provided by the user, and this is a no-op.
+ return errEmptyStack
+ }
+ if part == "." {
+ // "." components are no-ops -- we drop them when doing SwapLink.
+ return nil
+ }
+
+ tailEntry := (*s)[len(*s)-1]
+
+ // Double-check that we are popping the component we expect.
+ if len(tailEntry.linkUnwalked) == 0 {
+ return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry)
+ }
+ headPart := tailEntry.linkUnwalked[0]
+ if headPart != part {
+ return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart)
+ }
+
+ // Drop the component, but keep the entry around in case we are dealing
+ // with a "tail-chained" symlink.
+ tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:]
+ return nil
+}
+
+func (s *symlinkStack) PopPart(part string) error {
+ if err := s.popPart(part); err != nil {
+ if errors.Is(err, errEmptyStack) {
+ // Skip empty stacks.
+ err = nil
+ }
+ return err
+ }
+
+ // Clean up any of the trailing stack entries that are empty.
+ for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- {
+ entry := (*s)[lastGood]
+ if len(entry.linkUnwalked) > 0 {
+ break
+ }
+ entry.Close()
+ (*s) = (*s)[:lastGood]
+ }
+ return nil
+}
+
+func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error {
+ if s == nil {
+ return nil
+ }
+ // Split the link target and clean up any "" parts.
+ linkTargetParts := slices.DeleteFunc(
+ strings.Split(linkTarget, "/"),
+ func(part string) bool { return part == "" || part == "." })
+
+ // Copy the directory so the caller doesn't close our copy.
+ dirCopy, err := dupFile(dir)
+ if err != nil {
+ return err
+ }
+
+ // Add to the stack.
+ *s = append(*s, &symlinkStackEntry{
+ dir: dirCopy,
+ remainingPath: remainingPath,
+ linkUnwalked: linkTargetParts,
+ })
+ return nil
+}
+
+func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error {
+ // If we are currently inside a symlink resolution, remove the symlink
+ // component from the last symlink entry, but don't remove the entry even
+ // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we
+ // hit during a symlink resolution) we need to keep the old symlink until
+ // we finish the resolution.
+ if err := s.popPart(linkPart); err != nil {
+ if !errors.Is(err, errEmptyStack) {
+ return err
+ }
+ // Push the component regardless of whether the stack was empty.
+ }
+ return s.push(dir, remainingPath, linkTarget)
+}
+
+func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) {
+ if s == nil || s.IsEmpty() {
+ return nil, "", false
+ }
+ tailEntry := (*s)[0]
+ *s = (*s)[1:]
+ return tailEntry.dir, tailEntry.remainingPath, true
+}
+
+// partialLookupInRoot tries to lookup as much of the request path as possible
+// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing
+// component of the requested path, returning a file handle to the final
+// existing component and a string containing the remaining path components.
+func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) {
+ return lookupInRoot(root, unsafePath, true)
+}
+
+func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) {
+ handle, remainingPath, err := lookupInRoot(root, unsafePath, false)
+ if remainingPath != "" && err == nil {
+ // should never happen
+ err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath)
+ }
+ // lookupInRoot(partial=false) will always close the handle if an error is
+ // returned, so no need to double-check here.
+ return handle, err
+}
+
+func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) {
+ unsafePath = filepath.ToSlash(unsafePath) // noop
+
+ // This is very similar to SecureJoin, except that we operate on the
+ // components using file descriptors. We then return the last component we
+ // managed open, along with the remaining path components not opened.
+
+ // Try to use openat2 if possible.
+ if hasOpenat2() {
+ return lookupOpenat2(root, unsafePath, partial)
+ }
+
+ // Get the "actual" root path from /proc/self/fd. This is necessary if the
+ // root is some magic-link like /proc/$pid/root, in which case we want to
+ // make sure when we do checkProcSelfFdPath that we are using the correct
+ // root path.
+ logicalRootPath, err := procSelfFdReadlink(root)
+ if err != nil {
+ return nil, "", fmt.Errorf("get real root path: %w", err)
+ }
+
+ currentDir, err := dupFile(root)
+ if err != nil {
+ return nil, "", fmt.Errorf("clone root fd: %w", err)
+ }
+ defer func() {
+ // If a handle is not returned, close the internal handle.
+ if Handle == nil {
+ _ = currentDir.Close()
+ }
+ }()
+
+ // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats
+ // dangling symlinks. If we hit a non-existent path while resolving a
+ // symlink, we need to return the (dir, remainingPath) that we had when we
+ // hit the symlink (treating the symlink as though it were a regular file).
+ // The set of (dir, remainingPath) sets is stored within the symlinkStack
+ // and we add and remove parts when we hit symlink and non-symlink
+ // components respectively. We need a stack because of recursive symlinks
+ // (symlinks that contain symlink components in their target).
+ //
+ // Note that the stack is ONLY used for book-keeping. All of the actual
+ // path walking logic is still based on currentPath/remainingPath and
+ // currentDir (as in SecureJoin).
+ var symStack *symlinkStack
+ if partial {
+ symStack = new(symlinkStack)
+ defer symStack.Close()
+ }
+
+ var (
+ linksWalked int
+ currentPath string
+ remainingPath = unsafePath
+ )
+ for remainingPath != "" {
+ // Save the current remaining path so if the part is not real we can
+ // return the path including the component.
+ oldRemainingPath := remainingPath
+
+ // Get the next path component.
+ var part string
+ if i := strings.IndexByte(remainingPath, '/'); i == -1 {
+ part, remainingPath = remainingPath, ""
+ } else {
+ part, remainingPath = remainingPath[:i], remainingPath[i+1:]
+ }
+ // If we hit an empty component, we need to treat it as though it is
+ // "." so that trailing "/" and "//" components on a non-directory
+ // correctly return the right error code.
+ if part == "" {
+ part = "."
+ }
+
+ // Apply the component lexically to the path we are building.
+ // currentPath does not contain any symlinks, and we are lexically
+ // dealing with a single component, so it's okay to do a filepath.Clean
+ // here.
+ nextPath := path.Join("/", currentPath, part)
+ // If we logically hit the root, just clone the root rather than
+ // opening the part and doing all of the other checks.
+ if nextPath == "/" {
+ if err := symStack.PopPart(part); err != nil {
+ return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err)
+ }
+ // Jump to root.
+ rootClone, err := dupFile(root)
+ if err != nil {
+ return nil, "", fmt.Errorf("clone root fd: %w", err)
+ }
+ _ = currentDir.Close()
+ currentDir = rootClone
+ currentPath = nextPath
+ continue
+ }
+
+ // Try to open the next component.
+ nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+ switch {
+ case err == nil:
+ st, err := nextDir.Stat()
+ if err != nil {
+ _ = nextDir.Close()
+ return nil, "", fmt.Errorf("stat component %q: %w", part, err)
+ }
+
+ switch st.Mode() & os.ModeType {
+ case os.ModeSymlink:
+ // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See
+ // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and
+ // fstatat() with empty relative pathnames").
+ linkDest, err := readlinkatFile(nextDir, "")
+ // We don't need the handle anymore.
+ _ = nextDir.Close()
+ if err != nil {
+ return nil, "", err
+ }
+
+ linksWalked++
+ if linksWalked > maxSymlinkLimit {
+ return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP}
+ }
+
+ // Swap out the symlink's component for the link entry itself.
+ if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil {
+ return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err)
+ }
+
+ // Update our logical remaining path.
+ remainingPath = linkDest + "/" + remainingPath
+ // Absolute symlinks reset any work we've already done.
+ if path.IsAbs(linkDest) {
+ // Jump to root.
+ rootClone, err := dupFile(root)
+ if err != nil {
+ return nil, "", fmt.Errorf("clone root fd: %w", err)
+ }
+ _ = currentDir.Close()
+ currentDir = rootClone
+ currentPath = "/"
+ }
+
+ default:
+ // If we are dealing with a directory, simply walk into it.
+ _ = currentDir.Close()
+ currentDir = nextDir
+ currentPath = nextPath
+
+ // The part was real, so drop it from the symlink stack.
+ if err := symStack.PopPart(part); err != nil {
+ return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err)
+ }
+
+ // If we are operating on a .., make sure we haven't escaped.
+ // We only have to check for ".." here because walking down
+ // into a regular component component cannot cause you to
+ // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we
+ // have to check every ".." rather than only checking after a
+ // rename or mount on the system.
+ if part == ".." {
+ // Make sure the root hasn't moved.
+ if err := checkProcSelfFdPath(logicalRootPath, root); err != nil {
+ return nil, "", fmt.Errorf("root path moved during lookup: %w", err)
+ }
+ // Make sure the path is what we expect.
+ fullPath := logicalRootPath + nextPath
+ if err := checkProcSelfFdPath(fullPath, currentDir); err != nil {
+ return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err)
+ }
+ }
+ }
+
+ default:
+ if !partial {
+ return nil, "", err
+ }
+ // If there are any remaining components in the symlink stack, we
+ // are still within a symlink resolution and thus we hit a dangling
+ // symlink. So pretend that the first symlink in the stack we hit
+ // was an ENOENT (to match openat2).
+ if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok {
+ _ = currentDir.Close()
+ return oldDir, remainingPath, err
+ }
+ // We have hit a final component that doesn't exist, so we have our
+ // partial open result. Note that we have to use the OLD remaining
+ // path, since the lookup failed.
+ return currentDir, oldRemainingPath, err
+ }
+ }
+
+ // If the unsafePath had a trailing slash, we need to make sure we try to
+ // do a relative "." open so that we will correctly return an error when
+ // the final component is a non-directory (to match openat2). In the
+ // context of openat2, a trailing slash and a trailing "/." are completely
+ // equivalent.
+ if strings.HasSuffix(unsafePath, "/") {
+ nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+ if err != nil {
+ if !partial {
+ _ = currentDir.Close()
+ currentDir = nil
+ }
+ return currentDir, "", err
+ }
+ _ = currentDir.Close()
+ currentDir = nextDir
+ }
+
+ // All of the components existed!
+ return currentDir, "", nil
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
new file mode 100644
index 000000000..b5f674524
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
@@ -0,0 +1,207 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ errInvalidMode = errors.New("invalid permission mode")
+ errPossibleAttack = errors.New("possible attack detected")
+)
+
+// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use
+// in two respects:
+//
+// - The caller provides the root directory as an *[os.File] (preferably O_PATH)
+// handle. This means that the caller can be sure which root directory is
+// being used. Note that this can be emulated by using /proc/self/fd/... as
+// the root path with [os.MkdirAll].
+//
+// - Once all of the directories have been created, an *[os.File] O_PATH handle
+// to the directory at unsafePath is returned to the caller. This is done in
+// an effectively-race-free way (an attacker would only be able to swap the
+// final directory component), which is not possible to emulate with
+// [MkdirAll].
+//
+// In addition, the returned handle is obtained far more efficiently than doing
+// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after
+// doing [MkdirAll]. If you intend to open the directory after creating it, you
+// should use MkdirAllHandle.
+func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) {
+ // Make sure there are no os.FileMode bits set.
+ if mode&^0o7777 != 0 {
+ return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode)
+ }
+ // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid
+ // bits. We could also silently ignore them but since we have very few
+ // users it seems more prudent to return an error so users notice that
+ // these bits will not be set.
+ if mode&^0o1777 != 0 {
+ return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
+ }
+
+ // Try to open as much of the path as possible.
+ currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath)
+ defer func() {
+ if Err != nil {
+ _ = currentDir.Close()
+ }
+ }()
+ if err != nil && !errors.Is(err, unix.ENOENT) {
+ return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err)
+ }
+
+ // If there is an attacker deleting directories as we walk into them,
+ // detect this proactively. Note this is guaranteed to detect if the
+ // attacker deleted any part of the tree up to currentDir.
+ //
+ // Once we walk into a dead directory, partialLookupInRoot would not be
+ // able to walk further down the tree (directories must be empty before
+ // they are deleted), and if the attacker has removed the entire tree we
+ // can be sure that anything that was originally inside a dead directory
+ // must also be deleted and thus is a dead directory in its own right.
+ //
+ // This is mostly a quality-of-life check, because mkdir will simply fail
+ // later if the attacker deletes the tree after this check.
+ if err := isDeadInode(currentDir); err != nil {
+ return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err)
+ }
+
+ // Re-open the path to match the O_DIRECTORY reopen loop later (so that we
+ // always return a non-O_PATH handle). We also check that we actually got a
+ // directory.
+ if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) {
+ return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR)
+ } else if err != nil {
+ return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err)
+ } else {
+ _ = currentDir.Close()
+ currentDir = reopenDir
+ }
+
+ remainingParts := strings.Split(remainingPath, string(filepath.Separator))
+ if slices.Contains(remainingParts, "..") {
+ // The path contained ".." components after the end of the "real"
+ // components. We could try to safely resolve ".." here but that would
+ // add a bunch of extra logic for something that it's not clear even
+ // needs to be supported. So just return an error.
+ //
+ // If we do filepath.Clean(remainingPath) then we end up with the
+ // problem that ".." can erase a trailing dangling symlink and produce
+ // a path that doesn't quite match what the user asked for.
+ return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath)
+ }
+
+ // Make sure the mode doesn't have any type bits.
+ mode &^= unix.S_IFMT
+
+ // Create the remaining components.
+ for _, part := range remainingParts {
+ switch part {
+ case "", ".":
+ // Skip over no-op paths.
+ continue
+ }
+
+ // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely
+ // create the final component without worrying about symlink-exchange
+ // attacks.
+ if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil {
+ err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err}
+ // Make the error a bit nicer if the directory is dead.
+ if err2 := isDeadInode(currentDir); err2 != nil {
+ err = fmt.Errorf("%w (%w)", err, err2)
+ }
+ return nil, err
+ }
+
+ // Get a handle to the next component. O_DIRECTORY means we don't need
+ // to use O_PATH.
+ var nextDir *os.File
+ if hasOpenat2() {
+ nextDir, err = openat2File(currentDir, part, &unix.OpenHow{
+ Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC,
+ Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV,
+ })
+ } else {
+ nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ }
+ if err != nil {
+ return nil, err
+ }
+ _ = currentDir.Close()
+ currentDir = nextDir
+
+ // It's possible that the directory we just opened was swapped by an
+ // attacker. Unfortunately there isn't much we can do to protect
+ // against this, and MkdirAll's behaviour is that we will reuse
+ // existing directories anyway so the need to protect against this is
+ // incredibly limited (and arguably doesn't even deserve mention here).
+ //
+ // Ideally we might want to check that the owner and mode match what we
+ // would've created -- unfortunately, it is non-trivial to verify that
+ // the owner and mode of the created directory match. While plain Unix
+ // DAC rules seem simple enough to emulate, there are a bunch of other
+ // factors that can change the mode or owner of created directories
+ // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on
+ // filesystems like vfat, etc etc). We used to try to verify this but
+ // it just lead to a series of spurious errors.
+ //
+ // We could also check that the directory is non-empty, but
+ // unfortunately some pseduofilesystems (like cgroupfs) create
+ // non-empty directories, which would result in different spurious
+ // errors.
+ }
+ return currentDir, nil
+}
+
+// MkdirAll is a race-safe alternative to the [os.MkdirAll] function,
+// where the new directory is guaranteed to be within the root directory (if an
+// attacker can move directories from inside the root to outside the root, the
+// created directory tree might be outside of the root but the key constraint
+// is that at no point will we walk outside of the directory tree we are
+// creating).
+//
+// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to
+//
+// path, _ := securejoin.SecureJoin(root, unsafePath)
+// err := os.MkdirAll(path, mode)
+//
+// But is much safer. The above implementation is unsafe because if an attacker
+// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is
+// possible for MkdirAll to resolve unsafe symlink components and create
+// directories outside of the root.
+//
+// If you plan to open the directory after you have created it or want to use
+// an open directory handle as the root, you should use [MkdirAllHandle] instead.
+// This function is a wrapper around [MkdirAllHandle].
+//
+// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not
+// the Go generic mode bits ([os.FileMode]...).
+func MkdirAll(root, unsafePath string, mode int) error {
+ rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+ defer rootDir.Close()
+
+ f, err := MkdirAllHandle(rootDir, unsafePath, mode)
+ if err != nil {
+ return err
+ }
+ _ = f.Close()
+ return nil
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go
new file mode 100644
index 000000000..230be73f0
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go
@@ -0,0 +1,103 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided
+// using an *[os.File] handle, to ensure that the correct root directory is used.
+func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) {
+ handle, err := completeLookupInRoot(root, unsafePath)
+ if err != nil {
+ return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err}
+ }
+ return handle, nil
+}
+
+// OpenInRoot safely opens the provided unsafePath within the root.
+// Effectively, OpenInRoot(root, unsafePath) is equivalent to
+//
+// path, _ := securejoin.SecureJoin(root, unsafePath)
+// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC)
+//
+// But is much safer. The above implementation is unsafe because if an attacker
+// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is
+// possible for the returned file to be outside of the root.
+//
+// Note that the returned handle is an O_PATH handle, meaning that only a very
+// limited set of operations will work on the handle. This is done to avoid
+// accidentally opening an untrusted file that could cause issues (such as a
+// disconnected TTY that could cause a DoS, or some other issue). In order to
+// use the returned handle, you can "upgrade" it to a proper handle using
+// [Reopen].
+func OpenInRoot(root, unsafePath string) (*os.File, error) {
+ rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer rootDir.Close()
+ return OpenatInRoot(rootDir, unsafePath)
+}
+
+// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd.
+// Reopen(file, flags) is effectively equivalent to
+//
+// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd())
+// os.OpenFile(fdPath, flags|unix.O_CLOEXEC)
+//
+// But with some extra hardenings to ensure that we are not tricked by a
+// maliciously-configured /proc mount. While this attack scenario is not
+// common, in container runtimes it is possible for higher-level runtimes to be
+// tricked into configuring an unsafe /proc that can be used to attack file
+// operations. See [CVE-2019-19921] for more details.
+//
+// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw
+func Reopen(handle *os.File, flags int) (*os.File, error) {
+ procRoot, err := getProcRoot()
+ if err != nil {
+ return nil, err
+ }
+
+ // We can't operate on /proc/thread-self/fd/$n directly when doing a
+ // re-open, so we need to open /proc/thread-self/fd and then open a single
+ // final component.
+ procFdDir, closer, err := procThreadSelf(procRoot, "fd/")
+ if err != nil {
+ return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err)
+ }
+ defer procFdDir.Close()
+ defer closer()
+
+ // Try to detect if there is a mount on top of the magic-link we are about
+ // to open. If we are using unsafeHostProcRoot(), this could change after
+ // we check it (and there's nothing we can do about that) but for
+ // privateProcRoot() this should be guaranteed to be safe (at least since
+ // Linux 5.12[1], when anonymous mount namespaces were completely isolated
+ // from external mounts including mount propagation events).
+ //
+ // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
+ // onto targets that reside on shared mounts").
+ fdStr := strconv.Itoa(int(handle.Fd()))
+ if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil {
+ return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err)
+ }
+
+ flags |= unix.O_CLOEXEC
+ // Rather than just wrapping openatFile, open-code it so we can copy
+ // handle.Name().
+ reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0)
+ if err != nil {
+ return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err)
+ }
+ return os.NewFile(uintptr(reopenFd), handle.Name()), nil
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
new file mode 100644
index 000000000..ae3b381ef
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
@@ -0,0 +1,128 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "golang.org/x/sys/unix"
+)
+
+var hasOpenat2 = sync.OnceValue(func() bool {
+ fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
+ Flags: unix.O_PATH | unix.O_CLOEXEC,
+ Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
+ })
+ if err != nil {
+ return false
+ }
+ _ = unix.Close(fd)
+ return true
+})
+
+func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool {
+ // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve
+ // ".." while a mount or rename occurs anywhere on the system. This could
+ // happen spuriously, or as the result of an attacker trying to mess with
+ // us during lookup.
+ //
+ // In addition, scoped lookups have a "safety check" at the end of
+ // complete_walk which will return -EXDEV if the final path is not in the
+ // root.
+ return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 &&
+ (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV))
+}
+
+const scopedLookupMaxRetries = 10
+
+func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) {
+ fullPath := dir.Name() + "/" + path
+ // Make sure we always set O_CLOEXEC.
+ how.Flags |= unix.O_CLOEXEC
+ var tries int
+ for tries < scopedLookupMaxRetries {
+ fd, err := unix.Openat2(int(dir.Fd()), path, how)
+ if err != nil {
+ if scopedLookupShouldRetry(how, err) {
+ // We retry a couple of times to avoid the spurious errors, and
+ // if we are being attacked then returning -EAGAIN is the best
+ // we can do.
+ tries++
+ continue
+ }
+ return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err}
+ }
+ // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong.
+ // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise
+ // you'll get infinite recursion here.
+ if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT {
+ if actualPath, err := rawProcSelfFdReadlink(fd); err == nil {
+ fullPath = actualPath
+ }
+ }
+ return os.NewFile(uintptr(fd), fullPath), nil
+ }
+ return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack}
+}
+
+func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) {
+ if !partial {
+ file, err := openat2File(root, unsafePath, &unix.OpenHow{
+ Flags: unix.O_PATH | unix.O_CLOEXEC,
+ Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
+ })
+ return file, "", err
+ }
+ return partialLookupOpenat2(root, unsafePath)
+}
+
+// partialLookupOpenat2 is an alternative implementation of
+// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a
+// handle to the deepest existing child of the requested path within the root.
+func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) {
+ // TODO: Implement this as a git-bisect-like binary search.
+
+ unsafePath = filepath.ToSlash(unsafePath) // noop
+ endIdx := len(unsafePath)
+ var lastError error
+ for endIdx > 0 {
+ subpath := unsafePath[:endIdx]
+
+ handle, err := openat2File(root, subpath, &unix.OpenHow{
+ Flags: unix.O_PATH | unix.O_CLOEXEC,
+ Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS,
+ })
+ if err == nil {
+ // Jump over the slash if we have a non-"" remainingPath.
+ if endIdx < len(unsafePath) {
+ endIdx += 1
+ }
+ // We found a subpath!
+ return handle, unsafePath[endIdx:], lastError
+ }
+ if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) {
+ // That path doesn't exist, let's try the next directory up.
+ endIdx = strings.LastIndexByte(subpath, '/')
+ lastError = err
+ continue
+ }
+ return nil, "", fmt.Errorf("open subpath: %w", err)
+ }
+ // If we couldn't open anything, the whole subpath is missing. Return a
+ // copy of the root fd so that the caller doesn't close this one by
+ // accident.
+ rootClone, err := dupFile(root)
+ if err != nil {
+ return nil, "", err
+ }
+ return rootClone, unsafePath, lastError
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go
new file mode 100644
index 000000000..949fb5f2d
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go
@@ -0,0 +1,59 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/unix"
+)
+
+func dupFile(f *os.File) (*os.File, error) {
+ fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0)
+ if err != nil {
+ return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err)
+ }
+ return os.NewFile(uintptr(fd), f.Name()), nil
+}
+
+func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) {
+ // Make sure we always set O_CLOEXEC.
+ flags |= unix.O_CLOEXEC
+ fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode))
+ if err != nil {
+ return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err}
+ }
+ // All of the paths we use with openatFile(2) are guaranteed to be
+ // lexically safe, so we can use path.Join here.
+ fullPath := filepath.Join(dir.Name(), path)
+ return os.NewFile(uintptr(fd), fullPath), nil
+}
+
+func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) {
+ var stat unix.Stat_t
+ if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil {
+ return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err}
+ }
+ return stat, nil
+}
+
+func readlinkatFile(dir *os.File, path string) (string, error) {
+ size := 4096
+ for {
+ linkBuf := make([]byte, size)
+ n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf)
+ if err != nil {
+ return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err}
+ }
+ if n != size {
+ return string(linkBuf[:n]), nil
+ }
+ // Possible truncation, resize the buffer.
+ size *= 2
+ }
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
new file mode 100644
index 000000000..8cc827d70
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
@@ -0,0 +1,440 @@
+//go:build linux
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+
+ "golang.org/x/sys/unix"
+)
+
+func fstat(f *os.File) (unix.Stat_t, error) {
+ var stat unix.Stat_t
+ if err := unix.Fstat(int(f.Fd()), &stat); err != nil {
+ return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err}
+ }
+ return stat, nil
+}
+
+func fstatfs(f *os.File) (unix.Statfs_t, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil {
+ return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err}
+ }
+ return statfs, nil
+}
+
+// The kernel guarantees that the root inode of a procfs mount has an
+// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO.
+const (
+ procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC
+ procRootIno = 1 // PROC_ROOT_INO
+)
+
+func verifyProcRoot(procRoot *os.File) error {
+ if statfs, err := fstatfs(procRoot); err != nil {
+ return err
+ } else if statfs.Type != procSuperMagic {
+ return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type)
+ }
+ if stat, err := fstat(procRoot); err != nil {
+ return err
+ } else if stat.Ino != procRootIno {
+ return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino)
+ }
+ return nil
+}
+
+var hasNewMountApi = sync.OnceValue(func() bool {
+ // All of the pieces of the new mount API we use (fsopen, fsconfig,
+ // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can
+ // just check for one of the syscalls and the others should also be
+ // available.
+ //
+ // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE.
+ // This is equivalent to openat(2), but tells us if open_tree is
+ // available (and thus all of the other basic new mount API syscalls).
+ // open_tree(2) is most light-weight syscall to test here.
+ //
+ // [1]: merge commit 400913252d09
+ // [2]:
+ fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC)
+ if err != nil {
+ return false
+ }
+ _ = unix.Close(fd)
+ return true
+})
+
+func fsopen(fsName string, flags int) (*os.File, error) {
+ // Make sure we always set O_CLOEXEC.
+ flags |= unix.FSOPEN_CLOEXEC
+ fd, err := unix.Fsopen(fsName, flags)
+ if err != nil {
+ return nil, os.NewSyscallError("fsopen "+fsName, err)
+ }
+ return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil
+}
+
+func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) {
+ // Make sure we always set O_CLOEXEC.
+ flags |= unix.FSMOUNT_CLOEXEC
+ fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs)
+ if err != nil {
+ return nil, os.NewSyscallError("fsmount "+ctx.Name(), err)
+ }
+ return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil
+}
+
+func newPrivateProcMount() (*os.File, error) {
+ procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC)
+ if err != nil {
+ return nil, err
+ }
+ defer procfsCtx.Close()
+
+ // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors.
+ _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable")
+ _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid")
+
+ // Get an actual handle.
+ if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil {
+ return nil, os.NewSyscallError("fsconfig create procfs", err)
+ }
+ return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID)
+}
+
+func openTree(dir *os.File, path string, flags uint) (*os.File, error) {
+ dirFd := -int(unix.EBADF)
+ dirName := "."
+ if dir != nil {
+ dirFd = int(dir.Fd())
+ dirName = dir.Name()
+ }
+ // Make sure we always set O_CLOEXEC.
+ flags |= unix.OPEN_TREE_CLOEXEC
+ fd, err := unix.OpenTree(dirFd, path, flags)
+ if err != nil {
+ return nil, &os.PathError{Op: "open_tree", Path: path, Err: err}
+ }
+ return os.NewFile(uintptr(fd), dirName+"/"+path), nil
+}
+
+func clonePrivateProcMount() (_ *os.File, Err error) {
+ // Try to make a clone without using AT_RECURSIVE if we can. If this works,
+ // we can be sure there are no over-mounts and so if the root is valid then
+ // we're golden. Otherwise, we have to deal with over-mounts.
+ procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE)
+ if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) {
+ procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("creating a detached procfs clone: %w", err)
+ }
+ defer func() {
+ if Err != nil {
+ _ = procfsHandle.Close()
+ }
+ }()
+ if err := verifyProcRoot(procfsHandle); err != nil {
+ return nil, err
+ }
+ return procfsHandle, nil
+}
+
+func privateProcRoot() (*os.File, error) {
+ if !hasNewMountApi() || hookForceGetProcRootUnsafe() {
+ return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP)
+ }
+ // Try to create a new procfs mount from scratch if we can. This ensures we
+ // can get a procfs mount even if /proc is fake (for whatever reason).
+ procRoot, err := newPrivateProcMount()
+ if err != nil || hookForcePrivateProcRootOpenTree(procRoot) {
+ // Try to clone /proc then...
+ procRoot, err = clonePrivateProcMount()
+ }
+ return procRoot, err
+}
+
+func unsafeHostProcRoot() (_ *os.File, Err error) {
+ procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if Err != nil {
+ _ = procRoot.Close()
+ }
+ }()
+ if err := verifyProcRoot(procRoot); err != nil {
+ return nil, err
+ }
+ return procRoot, nil
+}
+
+func doGetProcRoot() (*os.File, error) {
+ procRoot, err := privateProcRoot()
+ if err != nil {
+ // Fall back to using a /proc handle if making a private mount failed.
+ // If we have openat2, at least we can avoid some kinds of over-mount
+ // attacks, but without openat2 there's not much we can do.
+ procRoot, err = unsafeHostProcRoot()
+ }
+ return procRoot, err
+}
+
+var getProcRoot = sync.OnceValues(func() (*os.File, error) {
+ return doGetProcRoot()
+})
+
+var hasProcThreadSelf = sync.OnceValue(func() bool {
+ return unix.Access("/proc/thread-self/", unix.F_OK) == nil
+})
+
+var errUnsafeProcfs = errors.New("unsafe procfs detected")
+
+type procThreadSelfCloser func()
+
+// procThreadSelf returns a handle to /proc/thread-self/ (or an
+// equivalent handle on older kernels where /proc/thread-self doesn't exist).
+// Once finished with the handle, you must call the returned closer function
+// (runtime.UnlockOSThread). You must not pass the returned *os.File to other
+// Go threads or use the handle after calling the closer.
+//
+// This is similar to ProcThreadSelf from runc, but with extra hardening
+// applied and using *os.File.
+func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) {
+ // We need to lock our thread until the caller is done with the handle
+ // because between getting the handle and using it we could get interrupted
+ // by the Go runtime and hit the case where the underlying thread is
+ // swapped out and the original thread is killed, resulting in
+ // pull-your-hair-out-hard-to-debug issues in the caller.
+ runtime.LockOSThread()
+ defer func() {
+ if Err != nil {
+ runtime.UnlockOSThread()
+ }
+ }()
+
+ // Figure out what prefix we want to use.
+ threadSelf := "thread-self/"
+ if !hasProcThreadSelf() || hookForceProcSelfTask() {
+ /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually.
+ threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/"
+ if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() {
+ // In this case, we running in a pid namespace that doesn't match
+ // the /proc mount we have. This can happen inside runc.
+ //
+ // Unfortunately, there is no nice way to get the correct TID to
+ // use here because of the age of the kernel, so we have to just
+ // use /proc/self and hope that it works.
+ threadSelf = "self/"
+ }
+ }
+
+ // Grab the handle.
+ var (
+ handle *os.File
+ err error
+ )
+ if hasOpenat2() {
+ // We prefer being able to use RESOLVE_NO_XDEV if we can, to be
+ // absolutely sure we are operating on a clean /proc handle that
+ // doesn't have any cheeky overmounts that could trick us (including
+ // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't
+ // strictly needed, but just use it since we have it.
+ //
+ // NOTE: /proc/self is technically a magic-link (the contents of the
+ // symlink are generated dynamically), but it doesn't use
+ // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it.
+ //
+ // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses
+ // procSelfFdReadlink to clean up the returned f.Name() if we use
+ // RESOLVE_IN_ROOT (which would lead to an infinite recursion).
+ handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{
+ Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC,
+ Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS,
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+ }
+ } else {
+ handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+ }
+ defer func() {
+ if Err != nil {
+ _ = handle.Close()
+ }
+ }()
+ // We can't detect bind-mounts of different parts of procfs on top of
+ // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we
+ // aren't on the wrong filesystem here.
+ if statfs, err := fstatfs(handle); err != nil {
+ return nil, nil, err
+ } else if statfs.Type != procSuperMagic {
+ return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type)
+ }
+ }
+ return handle, runtime.UnlockOSThread, nil
+}
+
+var hasStatxMountId = sync.OnceValue(func() bool {
+ var (
+ stx unix.Statx_t
+ // We don't care which mount ID we get. The kernel will give us the
+ // unique one if it is supported.
+ wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
+ )
+ err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx)
+ return err == nil && stx.Mask&wantStxMask != 0
+})
+
+func getMountId(dir *os.File, path string) (uint64, error) {
+ // If we don't have statx(STATX_MNT_ID*) support, we can't do anything.
+ if !hasStatxMountId() {
+ return 0, nil
+ }
+
+ var (
+ stx unix.Statx_t
+ // We don't care which mount ID we get. The kernel will give us the
+ // unique one if it is supported.
+ wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
+ )
+
+ err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx)
+ if stx.Mask&wantStxMask == 0 {
+ // It's not a kernel limitation, for some reason we couldn't get a
+ // mount ID. Assume it's some kind of attack.
+ err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs)
+ }
+ if err != nil {
+ return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err}
+ }
+ return stx.Mnt_id, nil
+}
+
+func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error {
+ // Get the mntId of our procfs handle.
+ expectedMountId, err := getMountId(procRoot, "")
+ if err != nil {
+ return err
+ }
+ // Get the mntId of the target magic-link.
+ gotMountId, err := getMountId(dir, path)
+ if err != nil {
+ return err
+ }
+ // As long as the directory mount is alive, even with wrapping mount IDs,
+ // we would expect to see a different mount ID here. (Of course, if we're
+ // using unsafeHostProcRoot() then an attaker could change this after we
+ // did this check.)
+ if expectedMountId != gotMountId {
+ return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId)
+ }
+ return nil
+}
+
+func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) {
+ fdPath := fmt.Sprintf("fd/%d", fd)
+ procFdLink, closer, err := procThreadSelf(procRoot, fdPath)
+ if err != nil {
+ return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err)
+ }
+ defer procFdLink.Close()
+ defer closer()
+
+ // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly
+ // provide to the closure. If the closure uses the handle directly, this
+ // should be safe in general (a mount on top of the path afterwards would
+ // not affect the handle itself) and will definitely be safe if we are
+ // using privateProcRoot() (at least since Linux 5.12[1], when anonymous
+ // mount namespaces were completely isolated from external mounts including
+ // mount propagation events).
+ //
+ // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts
+ // onto targets that reside on shared mounts").
+ if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil {
+ return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err)
+ }
+
+ // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit
+ // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty
+ // relative pathnames").
+ return readlinkatFile(procFdLink, "")
+}
+
+func rawProcSelfFdReadlink(fd int) (string, error) {
+ procRoot, err := getProcRoot()
+ if err != nil {
+ return "", err
+ }
+ return doRawProcSelfFdReadlink(procRoot, fd)
+}
+
+func procSelfFdReadlink(f *os.File) (string, error) {
+ return rawProcSelfFdReadlink(int(f.Fd()))
+}
+
+var (
+ errPossibleBreakout = errors.New("possible breakout detected")
+ errInvalidDirectory = errors.New("wandered into deleted directory")
+ errDeletedInode = errors.New("cannot verify path of deleted inode")
+)
+
+func isDeadInode(file *os.File) error {
+ // If the nlink of a file drops to 0, there is an attacker deleting
+ // directories during our walk, which could result in weird /proc values.
+ // It's better to error out in this case.
+ stat, err := fstat(file)
+ if err != nil {
+ return fmt.Errorf("check for dead inode: %w", err)
+ }
+ if stat.Nlink == 0 {
+ err := errDeletedInode
+ if stat.Mode&unix.S_IFMT == unix.S_IFDIR {
+ err = errInvalidDirectory
+ }
+ return fmt.Errorf("%w %q", err, file.Name())
+ }
+ return nil
+}
+
+func checkProcSelfFdPath(path string, file *os.File) error {
+ if err := isDeadInode(file); err != nil {
+ return err
+ }
+ actualPath, err := procSelfFdReadlink(file)
+ if err != nil {
+ return fmt.Errorf("get path of handle: %w", err)
+ }
+ if actualPath != path {
+ return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path)
+ }
+ return nil
+}
+
+// Test hooks used in the procfs tests to verify that the fallback logic works.
+// See testing_mocks_linux_test.go and procfs_linux_test.go for more details.
+var (
+ hookForcePrivateProcRootOpenTree = hookDummyFile
+ hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile
+ hookForceGetProcRootUnsafe = hookDummy
+
+ hookForceProcSelfTask = hookDummy
+ hookForceProcSelf = hookDummy
+)
+
+func hookDummy() bool { return false }
+func hookDummyFile(_ *os.File) bool { return false }
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
deleted file mode 100644
index 66bb574b9..000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
+++ /dev/null
@@ -1 +0,0 @@
-github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
index a82a5eae1..36373f8c5 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -10,19 +10,19 @@ import "os"
// are several projects (umoci and go-mtree) that are using this sort of
// interface.
-// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
-// equivalent to using the standard os.* family of functions. This is mainly
+// VFS is the minimal interface necessary to use [SecureJoinVFS]. A nil VFS is
+// equivalent to using the standard [os].* family of functions. This is mainly
// used for the purposes of mock testing, but also can be used to otherwise use
-// SecureJoin with VFS-like system.
+// [SecureJoinVFS] with VFS-like system.
type VFS interface {
- // Lstat returns a FileInfo describing the named file. If the file is a
- // symbolic link, the returned FileInfo describes the symbolic link. Lstat
- // makes no attempt to follow the link. These semantics are identical to
- // os.Lstat.
+ // Lstat returns an [os.FileInfo] describing the named file. If the
+ // file is a symbolic link, the returned [os.FileInfo] describes the
+ // symbolic link. Lstat makes no attempt to follow the link.
+ // The semantics are identical to [os.Lstat].
Lstat(name string) (os.FileInfo, error)
- // Readlink returns the destination of the named symbolic link. These
- // semantics are identical to os.Readlink.
+ // Readlink returns the destination of the named symbolic link.
+ // The semantics are identical to [os.Readlink].
Readlink(name string) (string, error)
}
@@ -30,12 +30,6 @@ type VFS interface {
// module.
type osVFS struct{}
-// Lstat returns a FileInfo describing the named file. If the file is a
-// symbolic link, the returned FileInfo describes the symbolic link. Lstat
-// makes no attempt to follow the link. These semantics are identical to
-// os.Lstat.
func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
-// Readlink returns the destination of the named symbolic link. These
-// semantics are identical to os.Readlink.
func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }
diff --git a/vendor/github.com/distribution/reference/.gitattributes b/vendor/github.com/distribution/reference/.gitattributes
new file mode 100644
index 000000000..d207b1802
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
diff --git a/vendor/github.com/distribution/reference/.gitignore b/vendor/github.com/distribution/reference/.gitignore
new file mode 100644
index 000000000..dc07e6b04
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.gitignore
@@ -0,0 +1,2 @@
+# Cover profiles
+*.out
diff --git a/vendor/github.com/distribution/reference/.golangci.yml b/vendor/github.com/distribution/reference/.golangci.yml
new file mode 100644
index 000000000..793f0bb7e
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.golangci.yml
@@ -0,0 +1,18 @@
+linters:
+ enable:
+ - bodyclose
+ - dupword # Checks for duplicate words in the source code
+ - gofmt
+ - goimports
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ - vet
+ disable:
+ - errcheck
+
+run:
+ deadline: 2m
diff --git a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..48f6704c6
--- /dev/null
+++ b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
+
+Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
diff --git a/vendor/github.com/distribution/reference/CONTRIBUTING.md b/vendor/github.com/distribution/reference/CONTRIBUTING.md
new file mode 100644
index 000000000..ab2194665
--- /dev/null
+++ b/vendor/github.com/distribution/reference/CONTRIBUTING.md
@@ -0,0 +1,114 @@
+# Contributing to the reference library
+
+## Community help
+
+If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
+[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
+
+## Reporting security issues
+
+The maintainers take security seriously. If you discover a security
+issue, please bring it to their attention right away!
+
+Please **DO NOT** file a public issue, instead send your report privately to
+[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
+
+## Reporting an issue properly
+
+By following these simple rules you will get better and faster feedback on your issue.
+
+ - search the bugtracker for an already reported issue
+
+### If you found an issue that describes your problem:
+
+ - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
+ - please refrain from adding "same thing here" or "+1" comments
+ - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
+ - comment if you have some new, technical and relevant information to add to the case
+ - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
+
+### If you have not found an existing issue that describes your problem:
+
+ 1. create a new issue, with a succinct title that describes your issue:
+ - bad title: "It doesn't work with my docker"
+ - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
+ 2. copy the output of (or similar for other container tools):
+ - `docker version`
+ - `docker info`
+ - `docker exec registry --version`
+ 3. copy the command line you used to launch your Registry
+ 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
+ 5. reproduce your problem and get your docker daemon logs showing the error
+ 6. if relevant, copy your registry logs that show the error
+ 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
+ 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
+
+## Contributing Code
+
+Contributions should be made via pull requests. Pull requests will be reviewed
+by one or more maintainers or reviewers and merged when acceptable.
+
+You should follow the basic GitHub workflow:
+
+ 1. Use your own [fork](https://help.github.com/en/articles/about-forks)
+ 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
+ 3. Test your code
+ 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
+ 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
+
+Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
+for tips on creating a successful contribution.
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/distribution/reference/GOVERNANCE.md b/vendor/github.com/distribution/reference/GOVERNANCE.md
new file mode 100644
index 000000000..200045b05
--- /dev/null
+++ b/vendor/github.com/distribution/reference/GOVERNANCE.md
@@ -0,0 +1,144 @@
+# distribution/reference Project Governance
+
+Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
+
+For specific guidance on practical contribution steps please
+see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
+
+## Maintainership
+
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+1) They share responsibility in the project's success.
+2) They have made a long-term, recurring time investment to improve the project.
+3) They spend that time doing whatever needs to be done, not necessarily what
+is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is harder to appreciate.
+It's easy to appreciate a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+
+## Reviewers
+
+A reviewer is a core role within the project.
+They share in reviewing issues and pull requests and their LGTM counts towards the
+required LGTM count to merge a code change into the project.
+
+Reviewers are part of the organization but do not have write access.
+Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
+
+## Adding maintainers
+
+Maintainers are first and foremost contributors that have shown they are
+committed to the long term success of a project. Contributors wanting to become
+maintainers are expected to be deeply involved in contributing code, pull
+request review, and triage of issues in the project for more than three months.
+
+Just contributing does not make you a maintainer, it is about building trust
+with the current maintainers of the project and being a person that they can
+depend on and trust to make decisions in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors that have
+shown regular activity on the project over the prior months. From this list,
+maintainer candidates are selected and proposed in a pull request or a
+maintainers communication channel.
+
+After a candidate has been announced to the maintainers, the existing
+maintainers are given five business days to discuss the candidate, raise
+objections and cast their vote. Votes may take place on the communication
+channel or via pull request comment. Candidates must be approved by at least 66%
+of the current maintainers by adding their vote on the mailing list. The
+reviewer role has the same process but only requires 33% of current maintainers.
+Only maintainers of the repository that the candidate is proposed for are
+allowed to vote.
+
+If a candidate is approved, a maintainer will contact the candidate to invite
+the candidate to open a pull request that adds the contributor to the
+MAINTAINERS file. The voting process may take place inside a pull request if a
+maintainer has already discussed the candidacy with the candidate and a
+maintainer is willing to be a sponsor by opening the pull request. The candidate
+becomes a maintainer once the pull request is merged.
+
+## Stepping down policy
+
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+
+## Removal of inactive maintainers
+
+Similar to the procedure for adding new maintainers, existing maintainers can
+be removed from the list if they do not show significant activity on the
+project. Periodically, the maintainers review the list of maintainers and their
+activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a neutral
+person will contact the maintainer to ask if they want to continue being
+a maintainer. If the maintainer decides to step down as a maintainer, they
+open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to remain a maintainer, but is unable to perform the
+required duties they can be removed with a vote of at least 66% of the current
+maintainers. In this case, maintainers should first propose the change to
+maintainers via the maintainers communication channel, then open a pull request
+for voting. The voting period is five business days. The voting pull request
+should not come as a surpise to any maintainer and any discussion related to
+performance must not be discussed on the pull request.
+
+## How are decisions made?
+
+Docker distribution is an open-source project with an open design philosophy.
+This means that the repository is the source of truth for EVERY aspect of the
+project, including its philosophy, design, road map, and APIs. *If it's part of
+the project, it's in the repo. If it's in the repo, it's part of the project.*
+
+As a result, all decisions can be expressed as changes to the repository. An
+implementation change is a change to the source code. An API change is a change
+to the API specification. A philosophy change is a change to the philosophy
+manifesto, and so on.
+
+All decisions affecting distribution, big and small, follow the same 3 steps:
+
+* Step 1: Open a pull request. Anyone can do this.
+
+* Step 2: Discuss the pull request. Anyone can do this.
+
+* Step 3: Merge or refuse the pull request. Who does this depends on the nature
+of the pull request and which areas of the project it affects.
+
+## Helping contributors with the DCO
+
+The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
+requirement is not intended as a roadblock or speed bump.
+
+Some contributors are not as familiar with `git`, or have used a web
+based editor, and thus asking them to `git commit --amend -s` is not the best
+way forward.
+
+In this case, maintainers can update the commits based on clause (c) of the DCO.
+The most trivial way for a contributor to allow the maintainer to do this, is to
+add a DCO signature in a pull requests's comment, or a maintainer can simply
+note that the change is sufficiently trivial that it does not substantially
+change the existing contribution - i.e., a spelling change.
+
+When you add someone's DCO, please also add your own to keep a log.
+
+## I'm a maintainer. Should I make pull requests too?
+
+Yes. Nobody should ever push to master directly. All changes should be
+made through a pull request.
+
+## Conflict Resolution
+
+If you have a technical dispute that you feel has reached an impasse with a
+subset of the community, any contributor may open an issue, specifically
+calling for a resolution vote of the current core maintainers to resolve the
+dispute. The same voting quorums required (2/3) for adding and removing
+maintainers will apply to conflict resolution.
diff --git a/vendor/github.com/distribution/reference/LICENSE b/vendor/github.com/distribution/reference/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/distribution/reference/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/distribution/reference/MAINTAINERS b/vendor/github.com/distribution/reference/MAINTAINERS
new file mode 100644
index 000000000..9e0a60c8b
--- /dev/null
+++ b/vendor/github.com/distribution/reference/MAINTAINERS
@@ -0,0 +1,26 @@
+# Distribution project maintainers & reviewers
+#
+# See GOVERNANCE.md for maintainer versus reviewer roles
+#
+# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
+# GitHub ID, Name, Email address
+"chrispat","Chris Patterson","chrispat@github.com"
+"clarkbw","Bryan Clark","clarkbw@github.com"
+"corhere","Cory Snider","csnider@mirantis.com"
+"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
+"heww","He Weiwei","hweiwei@vmware.com"
+"joaodrp","João Pereira","jpereira@gitlab.com"
+"justincormack","Justin Cormack","justin.cormack@docker.com"
+"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
+"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
+"sargun","Sargun Dhillon","sargun@sargun.me"
+"wy65701436","Wang Yan","wangyan@vmware.com"
+"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
+#
+# REVIEWERS
+# GitHub ID, Name, Email address
+"dmcgowan","Derek McGowan","derek@mcgstyle.net"
+"stevvooe","Stephen Day","stevvooe@gmail.com"
+"thajeztah","Sebastiaan van Stijn","github@gone.nl"
+"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
+"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
diff --git a/vendor/github.com/distribution/reference/Makefile b/vendor/github.com/distribution/reference/Makefile
new file mode 100644
index 000000000..c78576b75
--- /dev/null
+++ b/vendor/github.com/distribution/reference/Makefile
@@ -0,0 +1,25 @@
+# Project packages.
+PACKAGES=$(shell go list ./...)
+
+# Flags passed to `go test`
+BUILDFLAGS ?=
+TESTFLAGS ?=
+
+.PHONY: all build test coverage
+.DEFAULT: all
+
+all: build
+
+build: ## no binaries to build, so just check compilation suceeds
+ go build ${BUILDFLAGS} ./...
+
+test: ## run tests
+ go test ${TESTFLAGS} ./...
+
+coverage: ## generate coverprofiles from the unit tests
+ rm -f coverage.txt
+ go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
+
+.PHONY: help
+help:
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md
new file mode 100644
index 000000000..172a02e0b
--- /dev/null
+++ b/vendor/github.com/distribution/reference/README.md
@@ -0,0 +1,30 @@
+# Distribution reference
+
+Go library to handle references to container images.
+
+
+
+[](https://github.com/distribution/reference/actions?query=workflow%3ACI)
+[](https://pkg.go.dev/github.com/distribution/reference)
+[](LICENSE)
+[](https://codecov.io/gh/distribution/reference)
+[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
+
+This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
+
+## Contribution
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project.
+
+## Communication
+
+For async communication and long running discussions please use issues and pull requests on the github repo.
+This will be the best place to discuss design and implementation.
+
+For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
+that everyone is welcome to join and chat about development.
+
+## Licenses
+
+The distribution codebase is released under the [Apache 2.0 license](LICENSE).
diff --git a/vendor/github.com/distribution/reference/SECURITY.md b/vendor/github.com/distribution/reference/SECURITY.md
new file mode 100644
index 000000000..aaf983c0f
--- /dev/null
+++ b/vendor/github.com/distribution/reference/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+## Reporting a Vulnerability
+
+The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
+
+Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
diff --git a/vendor/github.com/distribution/reference/distribution-logo.svg b/vendor/github.com/distribution/reference/distribution-logo.svg
new file mode 100644
index 000000000..cc9f4073b
--- /dev/null
+++ b/vendor/github.com/distribution/reference/distribution-logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/distribution/reference/go.mod b/vendor/github.com/distribution/reference/go.mod
new file mode 100644
index 000000000..25cf64aca
--- /dev/null
+++ b/vendor/github.com/distribution/reference/go.mod
@@ -0,0 +1,5 @@
+module github.com/distribution/reference
+
+go 1.20
+
+require github.com/opencontainers/go-digest v1.0.0
diff --git a/vendor/github.com/distribution/reference/go.sum b/vendor/github.com/distribution/reference/go.sum
new file mode 100644
index 000000000..30f45e97f
--- /dev/null
+++ b/vendor/github.com/distribution/reference/go.sum
@@ -0,0 +1,2 @@
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
diff --git a/vendor/github.com/distribution/reference/helpers.go b/vendor/github.com/distribution/reference/helpers.go
new file mode 100644
index 000000000..d10c7ef83
--- /dev/null
+++ b/vendor/github.com/distribution/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See [path.Match] for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go
new file mode 100644
index 000000000..f4128314c
--- /dev/null
+++ b/vendor/github.com/distribution/reference/normalize.go
@@ -0,0 +1,255 @@
+package reference
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // legacyDefaultDomain is the legacy domain for Docker Hub (which was
+ // originally named "the Docker Index"). This domain is still used for
+ // authentication and image search, which were part of the "v1" Docker
+ // registry specification.
+ //
+ // This domain will continue to be supported, but there are plans to consolidate
+ // legacy domains to new "canonical" domains. Once those domains are decided
+ // on, we must update the normalization functions, but preserve compatibility
+ // with existing installs, clients, and user configuration.
+ legacyDefaultDomain = "index.docker.io"
+
+ // defaultDomain is the default domain used for images on Docker Hub.
+ // It is used to normalize "familiar" names to canonical names, for example,
+ // to convert "ubuntu" to "docker.io/library/ubuntu:latest".
+ //
+ // Note that actual domain of Docker Hub's registry is registry-1.docker.io.
+ // This domain will continue to be supported, but there are plans to consolidate
+ // legacy domains to new "canonical" domains. Once those domains are decided
+ // on, we must update the normalization functions, but preserve compatibility
+ // with existing installs, clients, and user configuration.
+ defaultDomain = "docker.io"
+
+ // officialRepoPrefix is the namespace used for official images on Docker Hub.
+ // It is used to normalize "familiar" names to canonical names, for example,
+ // to convert "ubuntu" to "docker.io/library/ubuntu:latest".
+ officialRepoPrefix = "library/"
+
+ // defaultTag is the default tag if no tag is provided.
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remote string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remote = remainder[:tagSep]
+ } else {
+ remote = remainder
+ }
+ if strings.ToLower(remote) != remote {
+ return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// namedTaggedDigested is a reference that has both a tag and a digest.
+type namedTaggedDigested interface {
+ NamedTagged
+ Digested
+}
+
+// ParseDockerRef normalizes the image reference following the docker convention,
+// which allows for references to contain both a tag and a digest. It returns a
+// reference that is either tagged or digested. For references containing both
+// a tag and a digest, it returns a digested reference. For example, the following
+// reference:
+//
+// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// Is returned as a digested reference (with the ":latest" tag removed):
+//
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// References that are already "tagged" or "digested" are returned unmodified:
+//
+// // Already a digested reference
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// // Already a named reference
+// docker.io/library/busybox:latest
+func ParseDockerRef(ref string) (Named, error) {
+ named, err := ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+ if canonical, ok := named.(namedTaggedDigested); ok {
+ // The reference is both tagged and digested; only return digested.
+ newNamed, err := WithName(canonical.Name())
+ if err != nil {
+ return nil, err
+ }
+ return WithDigest(newNamed, canonical.Digest())
+ }
+ return TagNameOnly(named), nil
+}
+
+// splitDockerDomain splits a repository name to domain and remote-name.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remoteName string) {
+ maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/")
+ if !ok {
+ // Fast-path for single element ("familiar" names), such as "ubuntu"
+ // or "ubuntu:latest". Familiar names must be handled separately, to
+ // prevent them from being handled as "hostname:port".
+ //
+ // Canonicalize them as "docker.io/library/name[:tag]"
+
+ // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain.
+ return defaultDomain, officialRepoPrefix + name
+ }
+
+ switch {
+ case maybeDomain == localhost:
+ // localhost is a reserved namespace and always considered a domain.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case maybeDomain == legacyDefaultDomain:
+ // canonicalize the Docker Hub and legacy "Docker Index" domains.
+ domain, remoteName = defaultDomain, maybeRemoteName
+ case strings.ContainsAny(maybeDomain, ".:"):
+ // Likely a domain or IP-address:
+ //
+ // - contains a "." (e.g., "example.com" or "127.0.0.1")
+ // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000")
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case strings.ToLower(maybeDomain) != maybeDomain:
+ // Uppercase namespaces are not allowed, so if the first element
+ // is not lowercase, we assume it to be a domain-name.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ default:
+ // None of the above: it's not a domain, so use the default, and
+ // use the name input the remote-name.
+ domain, remoteName = defaultDomain, name
+ }
+
+ if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') {
+ // Canonicalize "familiar" names, but only on Docker Hub, not
+ // on other domains:
+ //
+ // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]"
+ remoteName = officialRepoPrefix + remoteName
+ }
+
+ return domain, remoteName
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/"
+ if strings.HasPrefix(repo.path, officialRepoPrefix) {
+ // TODO(thaJeztah): this check may be too strict, as it assumes the
+ // "library/" namespace does not have nested namespaces. While this
+ // is true (currently), technically it would be possible for Docker
+ // Hub to use those (e.g. "library/distros/ubuntu:latest").
+ // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
+ if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
+ repo.path = remainder
+ }
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go
new file mode 100644
index 000000000..900398bde
--- /dev/null
+++ b/vendor/github.com/distribution/reference/reference.go
@@ -0,0 +1,432 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] remote-name
+// domain := host [':' port-number]
+// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
+// domain-name := domain-component ['.' domain-component]*
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// path (or "remote-name") := path-component ['/' path-component]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name.
+ RepositoryNameTotalLengthMax = 255
+
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ //
+ // Deprecated: use [RepositoryNameTotalLengthMax] instead.
+ NameTotalLengthMax = RepositoryNameTotalLengthMax
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the [Named] reference.
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the [Named] reference.
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+// splitDomain splits a named reference into a hostname and path string.
+// If no valid hostname is found, the hostname is empty and the full value
+// is returned as name
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ if len(repo.path) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(match[2]) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ repo := repository{}
+ if r, ok := ref.(namedRepository); ok {
+ repo.domain, repo.path = r.Domain(), r.Path()
+ } else {
+ repo.domain, repo.path = splitDomain(ref.Name())
+ }
+ return repo
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/distribution/reference/regexp.go b/vendor/github.com/distribution/reference/regexp.go
new file mode 100644
index 000000000..65bc49d79
--- /dev/null
+++ b/vendor/github.com/distribution/reference/regexp.go
@@ -0,0 +1,163 @@
+package reference
+
+import (
+ "regexp"
+ "strings"
+)
+
+// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:").
+var DigestRegexp = regexp.MustCompile(digestPat)
+
+// DomainRegexp matches hostname or IP-addresses, optionally including a port
+// number. It defines the structure of potential domain components that may be
+// part of image names. This is purposely a subset of what is allowed by DNS to
+// ensure backwards compatibility with Docker image names. It may be a subset of
+// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
+// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
+// addresses such as IPv4-Mapped).
+//
+// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
+var DomainRegexp = regexp.MustCompile(domainAndPort)
+
+// IdentifierRegexp is the format for string identifier used as a
+// content addressable identifier using sha256. These identifiers
+// are like digests without the algorithm, since sha256 is used.
+var IdentifierRegexp = regexp.MustCompile(identifier)
+
+// NameRegexp is the format for the name component of references, including
+// an optional domain and port, but without tag or digest suffix.
+var NameRegexp = regexp.MustCompile(namePat)
+
+// ReferenceRegexp is the full supported format of a reference. The regexp
+// is anchored and has capturing groups for name, tag, and digest
+// components.
+var ReferenceRegexp = regexp.MustCompile(referencePat)
+
+// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
+//
+// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
+var TagRegexp = regexp.MustCompile(tag)
+
+const (
+ // alphanumeric defines the alphanumeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphanumeric = `[a-z0-9]+`
+
+ // separator defines the separators allowed to be embedded in name
+ // components. This allows one period, one or two underscore and multiple
+ // dashes. Repeated dashes and underscores are intentionally treated
+ // differently. In order to support valid hostnames as name components,
+ // supporting repeated dash was added. Additionally double underscore is
+ // now allowed as a separator to loosen the restriction for previously
+ // supported names.
+ separator = `(?:[._]|__|[-]+)`
+
+ // localhost is treated as a special value for domain-name. Any other
+ // domain-name without a "." or a ":port" are considered a path component.
+ localhost = `localhost`
+
+ // domainNameComponent restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp.
+ domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
+
+ // optionalPort matches an optional port-number including the port separator
+ // (e.g. ":80").
+ optionalPort = `(?::[0-9]+)?`
+
+ // tag matches valid tag names. From docker/docker:graph/tags.go.
+ tag = `[\w][\w.-]{0,127}`
+
+ // digestPat matches well-formed digests, including algorithm (e.g. "sha256:").
+ //
+ // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
+ // so that go-digest defines the canonical format. Note that the go-digest is
+ // more relaxed:
+ // - it allows multiple algorithms (e.g. "sha256+b64:") to allow
+ // future expansion of supported algorithms.
+ // - it allows the "" value to use urlsafe base64 encoding as defined
+ // in [rfc4648, section 5].
+ //
+ // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
+ digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
+
+ // identifier is the format for a content addressable identifier using sha256.
+ // These identifiers are like digests without the algorithm, since sha256 is used.
+ identifier = `([a-f0-9]{64})`
+
+ // ipv6address are enclosed between square brackets and may be represented
+ // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
+ // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
+ // IPv4-Mapped are deliberately excluded.
+ ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
+)
+
+var (
+ // domainName defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names. This includes IPv4 addresses on decimal format.
+ domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
+
+ // host defines the structure of potential domains based on the URI
+ // Host subcomponent on rfc3986. It may be a subset of DNS domain name,
+ // or an IPv4 address in decimal format, or an IPv6 address between square
+ // brackets (excluding zone identifiers as defined by rfc6874 or special
+ // addresses such as IPv4-Mapped).
+ host = `(?:` + domainName + `|` + ipv6address + `)`
+
+ // allowed by the URI Host subcomponent on rfc3986 to ensure backwards
+ // compatibility with Docker image names.
+ domainAndPort = host + optionalPort
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = regexp.MustCompile(anchored(tag))
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
+
+ // pathComponent restricts path-components to start with an alphanumeric
+ // character, with following parts able to be separated by a separator
+ // (one period, one or two underscore and multiple dashes).
+ pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
+
+ // remoteName matches the remote-name of a repository. It consists of one
+ // or more forward slash (/) delimited path-components:
+ //
+ // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
+ remoteName = pathComponent + anyTimes(`/`+pathComponent)
+ namePat = optional(domainAndPort+`/`) + remoteName
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
+
+ referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
+)
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...string) string {
+ return `(?:` + strings.Join(res, "") + `)?`
+}
+
+// anyTimes wraps the expression in a non-capturing group that can occur
+// any number of times.
+func anyTimes(res ...string) string {
+ return `(?:` + strings.Join(res, "") + `)*`
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...string) string {
+ return `(` + strings.Join(res, "") + `)`
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...string) string {
+ return `^` + strings.Join(res, "") + `$`
+}
diff --git a/vendor/github.com/distribution/reference/sort.go b/vendor/github.com/distribution/reference/sort.go
new file mode 100644
index 000000000..416c37b07
--- /dev/null
+++ b/vendor/github.com/distribution/reference/sort.go
@@ -0,0 +1,75 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package reference
+
+import (
+ "sort"
+)
+
+// Sort sorts string references preferring higher information references.
+//
+// The precedence is as follows:
+//
+// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:")
+// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
+// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:")
+// 4. [Named] (e.g., "docker.io/library/busybox")
+// 5. [Digested] (e.g., "docker.io@sha256:")
+// 6. Parse error
+func Sort(references []string) []string {
+ var prefs []Reference
+ var bad []string
+
+ for _, ref := range references {
+ pref, err := ParseAnyReference(ref)
+ if err != nil {
+ bad = append(bad, ref)
+ } else {
+ prefs = append(prefs, pref)
+ }
+ }
+ sort.Slice(prefs, func(a, b int) bool {
+ ar := refRank(prefs[a])
+ br := refRank(prefs[b])
+ if ar == br {
+ return prefs[a].String() < prefs[b].String()
+ }
+ return ar < br
+ })
+ sort.Strings(bad)
+ var refs []string
+ for _, pref := range prefs {
+ refs = append(refs, pref.String())
+ }
+ return append(refs, bad...)
+}
+
+func refRank(ref Reference) uint8 {
+ if _, ok := ref.(Named); ok {
+ if _, ok = ref.(Tagged); ok {
+ if _, ok = ref.(Digested); ok {
+ return 1
+ }
+ return 2
+ }
+ if _, ok = ref.(Digested); ok {
+ return 3
+ }
+ return 4
+ }
+ return 5
+}
diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore
deleted file mode 100644
index 4cf7888e9..000000000
--- a/vendor/github.com/docker/distribution/.gitignore
+++ /dev/null
@@ -1,38 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-# never checkin from the bin file (for now)
-bin/*
-
-# Test key files
-*.pem
-
-# Cover profiles
-*.out
-
-# Editor/IDE specific files.
-*.sublime-project
-*.sublime-workspace
-.idea/*
diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json
deleted file mode 100644
index 9df5b14bc..000000000
--- a/vendor/github.com/docker/distribution/.gometalinter.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "Vendor": true,
- "Deadline": "2m",
- "Sort": ["linter", "severity", "path", "line"],
- "EnableGC": true,
- "Enable": [
- "structcheck",
- "staticcheck",
- "unconvert",
-
- "gofmt",
- "goimports",
- "golint",
- "vet"
- ]
-}
diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap
deleted file mode 100644
index 0f48321d4..000000000
--- a/vendor/github.com/docker/distribution/.mailmap
+++ /dev/null
@@ -1,32 +0,0 @@
-Stephen J Day Stephen Day
-Stephen J Day Stephen Day
-Olivier Gambier Olivier Gambier
-Brian Bland Brian Bland
-Brian Bland Brian Bland
-Josh Hawn Josh Hawn
-Richard Scothern Richard
-Richard Scothern Richard Scothern
-Andrew Meredith Andrew Meredith
-harche harche
-Jessie Frazelle
-Sharif Nassar Sharif Nassar
-Sven Dowideit Sven Dowideit
-Vincent Giersch Vincent Giersch
-davidli davidli
-Omer Cohen Omer Cohen
-Eric Yang Eric Yang
-Nikita Tarasov Nikita
-Yu Wang yuwaMSFT2
-Yu Wang Yu Wang (UC)
-Olivier Gambier dmp
-Olivier Gambier Olivier
-Olivier Gambier Olivier
-Elsan Li æŽæ¥ elsanli(æŽæ¥ )
-Rui Cao ruicao
-Gwendolynne Barr gbarr01
-Haibing Zhou 周海兵 zhouhaibing089
-Feng Honglin tifayuki
-Helen Xie Helen-xie
-Mike Brown Mike Brown
-Manish Tomar Manish Tomar
-Sakeven Jiang sakeven
diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml
deleted file mode 100644
index 44ced6045..000000000
--- a/vendor/github.com/docker/distribution/.travis.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-dist: trusty
-sudo: required
-# setup travis so that we can run containers for integration tests
-services:
- - docker
-
-language: go
-
-go:
- - "1.11.x"
-
-go_import_path: github.com/docker/distribution
-
-addons:
- apt:
- packages:
- - python-minimal
-
-
-env:
- - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
-
-before_install:
- - uname -r
- - sudo apt-get -q update
-
-install:
- - go get -u github.com/vbatts/git-validation
- # TODO: Add enforcement of license
- # - go get -u github.com/kunalkushwaha/ltag
- - cd $TRAVIS_BUILD_DIR
-
-script:
- - export GOOS=$TRAVIS_GOOS
- - export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- - DCO_VERBOSITY=-q script/validate/dco
- - GOOS=linux script/setup/install-dev-tools
- - script/validate/vendor
- - go build -i .
- - make check
- - make build
- - make binaries
- # Currently takes too long
- #- if [ "$GOOS" = "linux" ]; then make test-race ; fi
- - if [ "$GOOS" = "linux" ]; then make coverage ; fi
-
-after_success:
- - bash <(curl -s https://codecov.io/bash) -F linux
-
-before_deploy:
- # Run tests with storage driver configurations
diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md
deleted file mode 100644
index 2981d016b..000000000
--- a/vendor/github.com/docker/distribution/BUILDING.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-# Building the registry source
-
-## Use-case
-
-This is useful if you intend to actively work on the registry.
-
-### Alternatives
-
-Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
-
-People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
-
-OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
-
-### Gotchas
-
-You are expected to know your way around with go & git.
-
-If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
-
-## Build the development environment
-
-The first prerequisite of properly building distribution targets is to have a Go
-development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
-for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
-environment.
-
-If a Go development environment is setup, one can use `go get` to install the
-`registry` command from the current latest:
-
- go get github.com/docker/distribution/cmd/registry
-
-The above will install the source repository into the `GOPATH`.
-
-Now create the directory for the registry data (this might require you to set permissions properly)
-
- mkdir -p /var/lib/registry
-
-... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
-
-The `registry`
-binary can then be run with the following:
-
- $ $GOPATH/bin/registry --version
- $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
-
-> __NOTE:__ While you do not need to use `go get` to checkout the distribution
-> project, for these build instructions to work, the project must be checked
-> out in the correct location in the `GOPATH`. This should almost always be
-> `$GOPATH/src/github.com/docker/distribution`.
-
-The registry can be run with the default config using the following
-incantation:
-
- $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
- INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] debug server listening localhost:5001
-
-If it is working, one should see the above log messages.
-
-### Repeatable Builds
-
-For the full development experience, one should `cd` into
-`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
-commands, such as `go test`, should work per package (please see
-[Developing](#developing) if they don't work).
-
-A `Makefile` has been provided as a convenience to support repeatable builds.
-Please install the following into `GOPATH` for it to work:
-
- go get github.com/golang/lint/golint
-
-Once these commands are available in the `GOPATH`, run `make` to get a full
-build:
-
- $ make
- + clean
- + fmt
- + vet
- + lint
- + build
- github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
- github.com/sirupsen/logrus
- github.com/docker/libtrust
- ...
- github.com/yvasiyarov/gorelic
- github.com/docker/distribution/registry/handlers
- github.com/docker/distribution/cmd/registry
- + test
- ...
- ok github.com/docker/distribution/digest 7.875s
- ok github.com/docker/distribution/manifest 0.028s
- ok github.com/docker/distribution/notifications 17.322s
- ? github.com/docker/distribution/registry [no test files]
- ok github.com/docker/distribution/registry/api/v2 0.101s
- ? github.com/docker/distribution/registry/auth [no test files]
- ok github.com/docker/distribution/registry/auth/silly 0.011s
- ...
- + /Users/sday/go/src/github.com/docker/distribution/bin/registry
- + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
- + binaries
-
-The above provides a repeatable build using the contents of the vendor
-directory. This includes formatting, vetting, linting, building,
-testing and generating tagged binaries. We can verify this worked by running
-the registry binary generated in the "./bin" directory:
-
- $ ./bin/registry --version
- ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
-
-### Optional build tags
-
-Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
-the environment variable `DOCKER_BUILDTAGS`.
diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md
deleted file mode 100644
index 4c067d9e7..000000000
--- a/vendor/github.com/docker/distribution/CONTRIBUTING.md
+++ /dev/null
@@ -1,148 +0,0 @@
-# Contributing to the registry
-
-## Before reporting an issue...
-
-### If your problem is with...
-
- - automated builds
- - your account on the [Docker Hub](https://hub.docker.com/)
- - any other [Docker Hub](https://hub.docker.com/) issue
-
-Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
-
-### If you...
-
- - need help setting up your registry
- - can't figure out something
- - are not sure what's going on or what your problem is
-
-Then please do not open an issue here yet - you should first try one of the following support forums:
-
- - irc: #docker-distribution on freenode
- - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
-
-### Reporting security issues
-
-The Docker maintainers take security seriously. If you discover a security
-issue, please bring it to their attention right away!
-
-Please **DO NOT** file a public issue, instead send your report privately to
-[security@docker.com](mailto:security@docker.com).
-
-## Reporting an issue properly
-
-By following these simple rules you will get better and faster feedback on your issue.
-
- - search the bugtracker for an already reported issue
-
-### If you found an issue that describes your problem:
-
- - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- - please refrain from adding "same thing here" or "+1" comments
- - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- - comment if you have some new, technical and relevant information to add to the case
- - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
-
-### If you have not found an existing issue that describes your problem:
-
- 1. create a new issue, with a succinct title that describes your issue:
- - bad title: "It doesn't work with my docker"
- - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
- 2. copy the output of:
- - `docker version`
- - `docker info`
- - `docker exec registry --version`
- 3. copy the command line you used to launch your Registry
- 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
- 5. reproduce your problem and get your docker daemon logs showing the error
- 6. if relevant, copy your registry logs that show the error
- 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
- 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
-
-## Contributing a patch for a known bug, or a small correction
-
-You should follow the basic GitHub workflow:
-
- 1. fork
- 2. commit a change
- 3. make sure the tests pass
- 4. PR
-
-Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
-
- - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- - sign your commits using `-s`: `git commit -s -m "My commit"`
-
-Some simple rules to ensure quick merge:
-
- - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- - if you need to amend your PR following comments, please squash instead of adding more commits
-
-## Contributing new features
-
-You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
-
-If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
-If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
-
-Then you should submit your implementation, clearly linking to the issue (and possible proposal).
-
-Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
-
-It's mandatory to:
-
- - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- - address maintainers' comments and modify your submission accordingly
- - write tests for any new code
-
-Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
-
-Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
-
-## Coding Style
-
-Unless explicitly stated, we follow all coding guidelines from the Go
-community. While some of these standards may seem arbitrary, they somehow seem
-to result in a solid, consistent codebase.
-
-It is possible that the code base does not currently comply with these
-guidelines. We are not looking for a massive PR that fixes this, since that
-goes against the spirit of the guidelines. All new contributions should make a
-best effort to clean up and make the code base better than they left it.
-Obviously, apply your best judgement. Remember, the goal here is to make the
-code base easier for humans to navigate and understand. Always keep that in
-mind when nudging others to comply.
-
-The rules:
-
-1. All code should be formatted with `gofmt -s`.
-2. All code should pass the default levels of
- [`golint`](https://github.com/golang/lint).
-3. All code should follow the guidelines covered in [Effective
- Go](http://golang.org/doc/effective_go.html) and [Go Code Review
- Comments](https://github.com/golang/go/wiki/CodeReviewComments).
-4. Comment the code. Tell us the why, the history and the context.
-5. Document _all_ declarations and methods, even private ones. Declare
- expectations, caveats and anything else that may be important. If a type
- gets exported, having the comments already there will ensure it's ready.
-6. Variable name length should be proportional to its context and no longer.
- `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
- In practice, short methods will have short variable names and globals will
- have longer names.
-7. No underscores in package names. If you need a compound name, step back,
- and re-examine why you need a compound name. If you still think you need a
- compound name, lose the underscore.
-8. No utils or helpers packages. If a function is not general enough to
- warrant its own package, it has not been written generally enough to be a
- part of a util package. Just leave it unexported and well-documented.
-9. All tests should run with `go test` and outside tooling should not be
- required. No, we don't need another unit testing framework. Assertion
- packages are acceptable if they provide _real_ incremental value.
-10. Even though we call these "rules" above, they are actually just
- guidelines. Since you've read all the rules, you now know that.
-
-If you are having trouble getting into the mood of idiomatic Go, we recommend
-reading through [Effective Go](http://golang.org/doc/effective_go.html). The
-[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
-kool-aid is a lot easier than going thirsty.
diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile
deleted file mode 100644
index 9537817ca..000000000
--- a/vendor/github.com/docker/distribution/Dockerfile
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM golang:1.11-alpine AS build
-
-ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
-ENV BUILDTAGS include_oss include_gcs
-
-ARG GOOS=linux
-ARG GOARCH=amd64
-ARG GOARM=6
-
-RUN set -ex \
- && apk add --no-cache make git file
-
-WORKDIR $DISTRIBUTION_DIR
-COPY . $DISTRIBUTION_DIR
-RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked"
-
-FROM alpine
-COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
-COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry
-VOLUME ["/var/lib/registry"]
-EXPOSE 5000
-ENTRYPOINT ["registry"]
-CMD ["serve", "/etc/docker/registry/config.yml"]
diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS
deleted file mode 100644
index 3183620c5..000000000
--- a/vendor/github.com/docker/distribution/MAINTAINERS
+++ /dev/null
@@ -1,243 +0,0 @@
-# Distribution maintainers file
-#
-# This file describes who runs the docker/distribution project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-
-[Rules]
-
- [Rules.maintainers]
-
- title = "What is a maintainer?"
-
- text = """
-There are different types of maintainers, with different responsibilities, but
-all maintainers have 3 things in common:
-
-1) They share responsibility in the project's success.
-2) They have made a long-term, recurring time investment to improve the project.
-3) They spend that time doing whatever needs to be done, not necessarily what
-is the most interesting or fun.
-
-Maintainers are often under-appreciated, because their work is harder to appreciate.
-It's easy to appreciate a really cool and technically advanced feature. It's harder
-to appreciate the absence of bugs, the slow but steady improvement in stability,
-or the reliability of a release process. But those things distinguish a good
-project from a great one.
-"""
-
- [Rules.reviewer]
-
- title = "What is a reviewer?"
-
- text = """
-A reviewer is a core role within the project.
-They share in reviewing issues and pull requests and their LGTM count towards the
-required LGTM count to merge a code change into the project.
-
-Reviewers are part of the organization but do not have write access.
-Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
-"""
-
- [Rules.adding-maintainers]
-
- title = "How are maintainers added?"
-
- text = """
-Maintainers are first and foremost contributors that have shown they are
-committed to the long term success of a project. Contributors wanting to become
-maintainers are expected to be deeply involved in contributing code, pull
-request review, and triage of issues in the project for more than three months.
-
-Just contributing does not make you a maintainer, it is about building trust
-with the current maintainers of the project and being a person that they can
-depend on and trust to make decisions in the best interest of the project.
-
-Periodically, the existing maintainers curate a list of contributors that have
-shown regular activity on the project over the prior months. From this list,
-maintainer candidates are selected and proposed on the maintainers mailing list.
-
-After a candidate has been announced on the maintainers mailing list, the
-existing maintainers are given five business days to discuss the candidate,
-raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
-list. Only maintainers of the repository that the candidate is proposed for are
-allowed to vote.
-
-If a candidate is approved, a maintainer will contact the candidate to invite
-the candidate to open a pull request that adds the contributor to the
-MAINTAINERS file. The candidate becomes a maintainer once the pull request is
-merged.
-"""
-
- [Rules.stepping-down-policy]
-
- title = "Stepping down policy"
-
- text = """
-Life priorities, interests, and passions can change. If you're a maintainer but
-feel you must remove yourself from the list, inform other maintainers that you
-intend to step down, and if possible, help find someone to pick up your work.
-At the very least, ensure your work can be continued where you left off.
-
-After you've informed other maintainers, create a pull request to remove
-yourself from the MAINTAINERS file.
-"""
-
- [Rules.inactive-maintainers]
-
- title = "Removal of inactive maintainers"
-
- text = """
-Similar to the procedure for adding new maintainers, existing maintainers can
-be removed from the list if they do not show significant activity on the
-project. Periodically, the maintainers review the list of maintainers and their
-activity over the last three months.
-
-If a maintainer has shown insufficient activity over this period, a neutral
-person will contact the maintainer to ask if they want to continue being
-a maintainer. If the maintainer decides to step down as a maintainer, they
-open a pull request to be removed from the MAINTAINERS file.
-
-If the maintainer wants to remain a maintainer, but is unable to perform the
-required duties they can be removed with a vote of at least 66% of
-the current maintainers. An e-mail is sent to the
-mailing list, inviting maintainers of the project to vote. The voting period is
-five business days. Issues related to a maintainer's performance should be
-discussed with them among the other maintainers so that they are not surprised
-by a pull request removing them.
-"""
-
- [Rules.decisions]
-
- title = "How are decisions made?"
-
- text = """
-Short answer: EVERYTHING IS A PULL REQUEST.
-
-distribution is an open-source project with an open design philosophy. This means
-that the repository is the source of truth for EVERY aspect of the project,
-including its philosophy, design, road map, and APIs. *If it's part of the
-project, it's in the repo. If it's in the repo, it's part of the project.*
-
-As a result, all decisions can be expressed as changes to the repository. An
-implementation change is a change to the source code. An API change is a change
-to the API specification. A philosophy change is a change to the philosophy
-manifesto, and so on.
-
-All decisions affecting distribution, big and small, follow the same 3 steps:
-
-* Step 1: Open a pull request. Anyone can do this.
-
-* Step 2: Discuss the pull request. Anyone can do this.
-
-* Step 3: Merge or refuse the pull request. Who does this depends on the nature
-of the pull request and which areas of the project it affects.
-"""
-
- [Rules.DCO]
-
- title = "Helping contributors with the DCO"
-
- text = """
-The [DCO or `Sign your work`](
-https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work)
-requirement is not intended as a roadblock or speed bump.
-
-Some distribution contributors are not as familiar with `git`, or have used a web
-based editor, and thus asking them to `git commit --amend -s` is not the best
-way forward.
-
-In this case, maintainers can update the commits based on clause (c) of the DCO.
-The most trivial way for a contributor to allow the maintainer to do this, is to
-add a DCO signature in a pull requests's comment, or a maintainer can simply
-note that the change is sufficiently trivial that it does not substantially
-change the existing contribution - i.e., a spelling change.
-
-When you add someone's DCO, please also add your own to keep a log.
-"""
-
- [Rules."no direct push"]
-
- title = "I'm a maintainer. Should I make pull requests too?"
-
- text = """
-Yes. Nobody should ever push to master directly. All changes should be
-made through a pull request.
-"""
-
- [Rules.tsc]
-
- title = "Conflict Resolution and technical disputes"
-
- text = """
-distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
- """
-
- [Rules.meta]
-
- title = "How is this process changed?"
-
- text = "Just like everything else: by making a pull request :)"
-
-# Current project organization
-[Org]
-
- [Org.Maintainers]
- people = [
- "dmcgowan",
- "dmp42",
- "stevvooe",
- ]
- [Org.Reviewers]
- people = [
- "manishtomar",
- "caervs",
- "davidswu",
- "RobbKistler"
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.caervs]
- Name = "Ryan Abrams"
- Email = "rdabrams@gmail.com"
- GitHub = "caervs"
-
- [people.davidswu]
- Name = "David Wu"
- Email = "dwu7401@gmail.com"
- GitHub = "davidswu"
-
- [people.dmcgowan]
- Name = "Derek McGowan"
- Email = "derek@mcgstyle.net"
- GitHub = "dmcgowan"
-
- [people.dmp42]
- Name = "Olivier Gambier"
- Email = "olivier@docker.com"
- GitHub = "dmp42"
-
- [people.manishtomar]
- Name = "Manish Tomar"
- Email = "manish.tomar@docker.com"
- GitHub = "manishtomar"
-
- [people.RobbKistler]
- Name = "Robb Kistler"
- Email = "robb.kistler@docker.com"
- GitHub = "RobbKistler"
-
- [people.stevvooe]
- Name = "Stephen Day"
- Email = "stephen.day@docker.com"
- GitHub = "stevvooe"
diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile
deleted file mode 100644
index 4635c6eca..000000000
--- a/vendor/github.com/docker/distribution/Makefile
+++ /dev/null
@@ -1,102 +0,0 @@
-# Root directory of the project (absolute path).
-ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-
-# Used to populate version variable in main package.
-VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
-REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
-
-
-PKG=github.com/docker/distribution
-
-# Project packages.
-PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/)
-INTEGRATION_PACKAGE=${PKG}
-COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES})
-
-
-# Project binaries.
-COMMANDS=registry digest registry-api-descriptor-template
-
-# Allow turning off function inlining and variable registerization
-ifeq (${DISABLE_OPTIMIZATION},true)
- GO_GCFLAGS=-gcflags "-N -l"
- VERSION:="$(VERSION)-noopt"
-endif
-
-WHALE = "+"
-
-# Go files
-#
-TESTFLAGS_RACE=
-GOFILES=$(shell find . -type f -name '*.go')
-GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
-GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
-
-BINARIES=$(addprefix bin/,$(COMMANDS))
-
-# Flags passed to `go test`
-TESTFLAGS ?= -v $(TESTFLAGS_RACE)
-TESTFLAGS_PARALLEL ?= 8
-
-.PHONY: all build binaries check clean test test-race test-full integration coverage
-.DEFAULT: all
-
-all: binaries
-
-# This only needs to be generated by hand when cutting full releases.
-version/version.go:
- @echo "$(WHALE) $@"
- ./version/version.sh > $@
-
-check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
- @echo "$(WHALE) $@"
- gometalinter --config .gometalinter.json ./...
-
-test: ## run tests, except integration test with test.short
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-test-race: ## run tests, except integration test with test.short and race
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-test-full: ## run tests, except integration tests
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-integration: ## run integration tests
- @echo "$(WHALE) $@"
- @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE}
-
-coverage: ## generate coverprofiles from the unit tests
- @echo "$(WHALE) $@"
- @rm -f coverage.txt
- @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null
- @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \
- go test ${GO_TAGS} ${TESTFLAGS} \
- -cover \
- -coverprofile=profile.out \
- -covermode=atomic $$pkg || exit; \
- if [ -f profile.out ]; then \
- cat profile.out >> coverage.txt; \
- rm profile.out; \
- fi; \
- done )
-
-FORCE:
-
-# Build a binary from a cmd.
-bin/%: cmd/% FORCE
- @echo "$(WHALE) $@${BINARY_SUFFIX}"
- @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$<
-
-binaries: $(BINARIES) ## build binaries
- @echo "$(WHALE) $@"
-
-build:
- @echo "$(WHALE) $@"
- @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES)
-
-clean: ## clean up binaries
- @echo "$(WHALE) $@"
- @rm -f $(BINARIES)
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
deleted file mode 100644
index 998878850..000000000
--- a/vendor/github.com/docker/distribution/README.md
+++ /dev/null
@@ -1,130 +0,0 @@
-# Distribution
-
-The Docker toolset to pack, ship, store, and deliver content.
-
-This repository's main product is the Docker Registry 2.0 implementation
-for storing and distributing Docker images. It supersedes the
-[docker/docker-registry](https://github.com/docker/docker-registry)
-project with a new API design, focused around security and performance.
-
-
-
-[](https://circleci.com/gh/docker/distribution/tree/master)
-[](https://godoc.org/github.com/docker/distribution)
-
-This repository contains the following components:
-
-|**Component** |Description |
-|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
-| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
-| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
-| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
-
-### How does this integrate with Docker engine?
-
-This project should provide an implementation to a V2 API for use in the [Docker
-core project](https://github.com/docker/docker). The API should be embeddable
-and simplify the process of securely pulling and pushing content from `docker`
-daemons.
-
-### What are the long term goals of the Distribution project?
-
-The _Distribution_ project has the further long term goal of providing a
-secure tool chain for distributing content. The specifications, APIs and tools
-should be as useful with Docker as they are without.
-
-Our goal is to design a professional grade and extensible content distribution
-system that allow users to:
-
-* Enjoy an efficient, secured and reliable way to store, manage, package and
- exchange content
-* Hack/roll their own on top of healthy open-source components
-* Implement their own home made solution through good specs, and solid
- extensions mechanism.
-
-## More about Registry 2.0
-
-The new registry implementation provides the following benefits:
-
-- faster push and pull
-- new, more efficient implementation
-- simplified deployment
-- pluggable storage backend
-- webhook notifications
-
-For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
-
-### Who needs to deploy a registry?
-
-By default, Docker users pull images from Docker's public registry instance.
-[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
-ability. Users can also push images to a repository on Docker's public registry,
-if they have a [Docker Hub](https://hub.docker.com/) account.
-
-For some users and even companies, this default behavior is sufficient. For
-others, it is not.
-
-For example, users with their own software products may want to maintain a
-registry for private, company images. Also, you may wish to deploy your own
-image repository for images used to test or in continuous integration. For these
-use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
-may be the better choice.
-
-### Migration to Registry 2.0
-
-For those who have previously deployed their own registry based on the Registry
-1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
-data migration is required. A tool to assist with migration efforts has been
-created. For more information see [docker/migrator](https://github.com/docker/migrator).
-
-## Contribute
-
-Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
-issues, fixes, and patches to this project. If you are contributing code, see
-the instructions for [building a development environment](BUILDING.md).
-
-## Support
-
-If any issues are encountered while using the _Distribution_ project, several
-avenues are available for support:
-
-
-
-
- IRC
-
-
- #docker-distribution on FreeNode
-
-
-
-
- Issue Tracker
-
-
- github.com/docker/distribution/issues
-
-
-
-
- Google Groups
-
-
- https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
-
-
-
-
- Mailing List
-
-
- docker@dockerproject.org
-
-
-
-
-
-## License
-
-This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md
deleted file mode 100644
index 701127afe..000000000
--- a/vendor/github.com/docker/distribution/ROADMAP.md
+++ /dev/null
@@ -1,267 +0,0 @@
-# Roadmap
-
-The Distribution Project consists of several components, some of which are
-still being defined. This document defines the high-level goals of the
-project, identifies the current components, and defines the release-
-relationship to the Docker Platform.
-
-* [Distribution Goals](#distribution-goals)
-* [Distribution Components](#distribution-components)
-* [Project Planning](#project-planning): release-relationship to the Docker Platform.
-
-This road map is a living document, providing an overview of the goals and
-considerations made in respect of the future of the project.
-
-## Distribution Goals
-
-- Replace the existing [docker registry](github.com/docker/docker-registry)
- implementation as the primary implementation.
-- Replace the existing push and pull code in the docker engine with the
- distribution package.
-- Define a strong data model for distributing docker images
-- Provide a flexible distribution tool kit for use in the docker platform
-- Unlock new distribution models
-
-## Distribution Components
-
-Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
-features and bugfixes for a component will be added to the relevant milestone. If a feature or
-bugfix is not part of a milestone, it is currently unscheduled for
-implementation.
-
-* [Registry](#registry)
-* [Distribution Package](#distribution-package)
-
-***
-
-### Registry
-
-The new Docker registry is the main portion of the distribution repository.
-Registry 2.0 is the first release of the next-generation registry. This was
-primarily focused on implementing the [new registry
-API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
-with a focus on security and performance.
-
-Following from the Distribution project goals above, we have a set of goals
-for registry v2 that we would like to follow in the design. New features
-should be compared against these goals.
-
-#### Data Storage and Distribution First
-
-The registry's first goal is to provide a reliable, consistent storage
-location for Docker images. The registry should only provide the minimal
-amount of indexing required to fetch image data and no more.
-
-This means we should be selective in new features and API additions, including
-those that may require expensive, ever growing indexes. Requests should be
-servable in "constant time".
-
-#### Content Addressability
-
-All data objects used in the registry API should be content addressable.
-Content identifiers should be secure and verifiable. This provides a secure,
-reliable base from which to build more advanced content distribution systems.
-
-#### Content Agnostic
-
-In the past, changes to the image format would require large changes in Docker
-and the Registry. By decoupling the distribution and image format, we can
-allow the formats to progress without having to coordinate between the two.
-This means that we should be focused on decoupling Docker from the registry
-just as much as decoupling the registry from Docker. Such an approach will
-allow us to unlock new distribution models that haven't been possible before.
-
-We can take this further by saying that the new registry should be content
-agnostic. The registry provides a model of names, tags, manifests and content
-addresses and that model can be used to work with content.
-
-#### Simplicity
-
-The new registry should be closer to a microservice component than its
-predecessor. This means it should have a narrower API and a low number of
-service dependencies. It should be easy to deploy.
-
-This means that other solutions should be explored before changing the API or
-adding extra dependencies. If functionality is required, can it be added as an
-extension or companion service.
-
-#### Extensibility
-
-The registry should provide extension points to add functionality. By keeping
-the scope narrow, but providing the ability to add functionality.
-
-Features like search, indexing, synchronization and registry explorers fall
-into this category. No such feature should be added unless we've found it
-impossible to do through an extension.
-
-#### Active Feature Discussions
-
-The following are feature discussions that are currently active.
-
-If you don't see your favorite, unimplemented feature, feel free to contact us
-via IRC or the mailing list and we can talk about adding it. The goal here is
-to make sure that new features go through a rigid design process before
-landing in the registry.
-
-##### Proxying to other Registries
-
-A _pull-through caching_ mode exists for the registry, but is restricted from
-within the docker client to only mirror the official Docker Hub. This functionality
-can be expanded when image provenance has been specified and implemented in the
-distribution project.
-
-##### Metadata storage
-
-Metadata for the registry is currently stored with the manifest and layer data on
-the storage backend. While this is a big win for simplicity and reliably maintaining
-state, it comes with the cost of consistency and high latency. The mutable registry
-metadata operations should be abstracted behind an API which will allow ACID compliant
-storage systems to handle metadata.
-
-##### Peer to Peer transfer
-
-Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
-
-##### Indexing, Search and Discovery
-
-The original registry provided some implementation of search for use with
-private registries. Support has been elided from V2 since we'd like to both
-decouple search functionality from the registry. The makes the registry
-simpler to deploy, especially in use cases where search is not needed, and
-let's us decouple the image format from the registry.
-
-There are explorations into using the catalog API and notification system to
-build external indexes. The current line of thought is that we will define a
-common search API to index and query docker images. Such a system could be run
-as a companion to a registry or set of registries to power discovery.
-
-The main issue with search and discovery is that there are so many ways to
-accomplish it. There are two aspects to this project. The first is deciding on
-how it will be done, including an API definition that can work with changing
-data formats. The second is the process of integrating with `docker search`.
-We expect that someone attempts to address the problem with the existing tools
-and propose it as a standard search API or uses it to inform a standardization
-process. Once this has been explored, we integrate with the docker client.
-
-Please see the following for more detail:
-
-- https://github.com/docker/distribution/issues/206
-
-##### Deletes
-
-> __NOTE:__ Deletes are a much asked for feature. Before requesting this
-feature or participating in discussion, we ask that you read this section in
-full and understand the problems behind deletes.
-
-While, at first glance, implementing deleting seems simple, there are a number
-mitigating factors that make many solutions not ideal or even pathological in
-the context of a registry. The following paragraph discuss the background and
-approaches that could be applied to arrive at a solution.
-
-The goal of deletes in any system is to remove unused or unneeded data. Only
-data requested for deletion should be removed and no other data. Removing
-unintended data is worse than _not_ removing data that was requested for
-removal but ideally, both are supported. Generally, according to this rule, we
-err on holding data longer than needed, ensuring that it is only removed when
-we can be certain that it can be removed. With the current behavior, we opt to
-hold onto the data forever, ensuring that data cannot be incorrectly removed.
-
-To understand the problems with implementing deletes, one must understand the
-data model. All registry data is stored in a filesystem layout, implemented on
-a "storage driver", effectively a _virtual file system_ (VFS). The storage
-system must assume that this VFS layer will be eventually consistent and has
-poor read- after-write consistency, since this is the lower common denominator
-among the storage drivers. This is mitigated by writing values in reverse-
-dependent order, but makes wider transactional operations unsafe.
-
-Layered on the VFS model is a content-addressable _directed, acyclic graph_
-(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
-Since the same data can be referenced by multiple manifests, we only store
-data once, even if it is in different repositories. Thus, we have a set of
-blobs, referenced by tags and manifests. If we want to delete a blob we need
-to be certain that it is no longer referenced by another manifest or tag. When
-we delete a manifest, we also can try to delete the referenced blobs. Deciding
-whether or not a blob has an active reference is the crux of the problem.
-
-Conceptually, deleting a manifest and its resources is quite simple. Just find
-all the manifests, enumerate the referenced blobs and delete the blobs not in
-that set. An astute observer will recognize this as a garbage collection
-problem. As with garbage collection in programming languages, this is very
-simple when one always has a consistent view. When one adds parallelism and an
-inconsistent view of data, it becomes very challenging.
-
-A simple example can demonstrate this. Let's say we are deleting a manifest
-_A_ in one process. We scan the manifest and decide that all the blobs are
-ready for deletion. Concurrently, we have another process accepting a new
-manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
-is accepted and all the blobs are considered present, so the operation
-proceeds. The original process then deletes the referenced blobs, assuming
-they were unreferenced. The manifest _B_, which we thought had all of its data
-present, can no longer be served by the registry, since the dependent data has
-been deleted.
-
-Deleting data from the registry safely requires some way to coordinate this
-operation. The following approaches are being considered:
-
-- _Reference Counting_ - Maintain a count of references to each blob. This is
- challenging for a number of reasons: 1. maintaining a consistent consensus
- of reference counts across a set of Registries and 2. Building the initial
- list of reference counts for an existing registry. These challenges can be
- met with a consensus protocol like Paxos or Raft in the first case and a
- necessary but simple scan in the second..
-- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
- and find all blob references. Delete all unreferenced blobs. This approach
- is very simple but requires disabling writes for a period of time while the
- service reads all data. This is slow and expensive but very accurate and
- effective.
-- _Generational GC_ - Do something similar to above but instead of blocking
- writes, writes are sent to another storage backend while reads are broadcast
- to the new and old backends. GC is then performed on the read-only portion.
- Because writes land in the new backend, the data in the read-only section
- can be safely deleted. The main drawbacks of this approach are complexity
- and coordination.
-- _Centralized Oracle_ - Using a centralized, transactional database, we can
- know exactly which data is referenced at any given time. This avoids
- coordination problem by managing this data in a single location. We trade
- off metadata scalability for simplicity and performance. This is a very good
- option for most registry deployments. This would create a bottleneck for
- registry metadata. However, metadata is generally not the main bottleneck
- when serving images.
-
-Please let us know if other solutions exist that we have yet to enumerate.
-Note that for any approach, implementation is a massive consideration. For
-example, a mark-sweep based solution may seem simple but the amount of work in
-coordination offset the extra work it might take to build a _Centralized
-Oracle_. We'll accept proposals for any solution but please coordinate with us
-before dropping code.
-
-At this time, we have traded off simplicity and ease of deployment for disk
-space. Simplicity and ease of deployment tend to reduce developer involvement,
-which is currently the most expensive resource in software engineering. Taking
-on any solution for deletes will greatly effect these factors, trading off
-very cheap disk space for a complex deployment and operational story.
-
-Please see the following issues for more detail:
-
-- https://github.com/docker/distribution/issues/422
-- https://github.com/docker/distribution/issues/461
-- https://github.com/docker/distribution/issues/462
-
-### Distribution Package
-
-At its core, the Distribution Project is a set of Go packages that make up
-Distribution Components. At this time, most of these packages make up the
-Registry implementation.
-
-The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
-
-For feature additions, please see the Registry section. In the future, we may break out a
-separate Roadmap for distribution-specific features that apply to more than
-just the registry.
-
-***
-
-### Project Planning
-
-An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
-
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
deleted file mode 100644
index c0e9261be..000000000
--- a/vendor/github.com/docker/distribution/blobs.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package distribution
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "time"
-
- "github.com/docker/distribution/reference"
- "github.com/opencontainers/go-digest"
- "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-var (
- // ErrBlobExists returned when blob already exists
- ErrBlobExists = errors.New("blob exists")
-
- // ErrBlobDigestUnsupported when blob digest is an unsupported version.
- ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
-
- // ErrBlobUnknown when blob is not found.
- ErrBlobUnknown = errors.New("unknown blob")
-
- // ErrBlobUploadUnknown returned when upload is not found.
- ErrBlobUploadUnknown = errors.New("blob upload unknown")
-
- // ErrBlobInvalidLength returned when the blob has an expected length on
- // commit, meaning mismatched with the descriptor or an invalid value.
- ErrBlobInvalidLength = errors.New("blob invalid length")
-)
-
-// ErrBlobInvalidDigest returned when digest check fails.
-type ErrBlobInvalidDigest struct {
- Digest digest.Digest
- Reason error
-}
-
-func (err ErrBlobInvalidDigest) Error() string {
- return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
- err.Digest, err.Reason)
-}
-
-// ErrBlobMounted returned when a blob is mounted from another repository
-// instead of initiating an upload session.
-type ErrBlobMounted struct {
- From reference.Canonical
- Descriptor Descriptor
-}
-
-func (err ErrBlobMounted) Error() string {
- return fmt.Sprintf("blob mounted from: %v to: %v",
- err.From, err.Descriptor)
-}
-
-// Descriptor describes targeted content. Used in conjunction with a blob
-// store, a descriptor can be used to fetch, store and target any kind of
-// blob. The struct also describes the wire protocol format. Fields should
-// only be added but never changed.
-type Descriptor struct {
- // MediaType describe the type of the content. All text based formats are
- // encoded as utf-8.
- MediaType string `json:"mediaType,omitempty"`
-
- // Size in bytes of content.
- Size int64 `json:"size,omitempty"`
-
- // Digest uniquely identifies the content. A byte stream can be verified
- // against this digest.
- Digest digest.Digest `json:"digest,omitempty"`
-
- // URLs contains the source URLs of this content.
- URLs []string `json:"urls,omitempty"`
-
- // Annotations contains arbitrary metadata relating to the targeted content.
- Annotations map[string]string `json:"annotations,omitempty"`
-
- // Platform describes the platform which the image in the manifest runs on.
- // This should only be used when referring to a manifest.
- Platform *v1.Platform `json:"platform,omitempty"`
-
- // NOTE: Before adding a field here, please ensure that all
- // other options have been exhausted. Much of the type relationships
- // depend on the simplicity of this type.
-}
-
-// Descriptor returns the descriptor, to make it satisfy the Describable
-// interface. Note that implementations of Describable are generally objects
-// which can be described, not simply descriptors; this exception is in place
-// to make it more convenient to pass actual descriptors to functions that
-// expect Describable objects.
-func (d Descriptor) Descriptor() Descriptor {
- return d
-}
-
-// BlobStatter makes blob descriptors available by digest. The service may
-// provide a descriptor of a different digest if the provided digest is not
-// canonical.
-type BlobStatter interface {
- // Stat provides metadata about a blob identified by the digest. If the
- // blob is unknown to the describer, ErrBlobUnknown will be returned.
- Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
-}
-
-// BlobDeleter enables deleting blobs from storage.
-type BlobDeleter interface {
- Delete(ctx context.Context, dgst digest.Digest) error
-}
-
-// BlobEnumerator enables iterating over blobs from storage
-type BlobEnumerator interface {
- Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
-}
-
-// BlobDescriptorService manages metadata about a blob by digest. Most
-// implementations will not expose such an interface explicitly. Such mappings
-// should be maintained by interacting with the BlobIngester. Hence, this is
-// left off of BlobService and BlobStore.
-type BlobDescriptorService interface {
- BlobStatter
-
- // SetDescriptor assigns the descriptor to the digest. The provided digest and
- // the digest in the descriptor must map to identical content but they may
- // differ on their algorithm. The descriptor must have the canonical
- // digest of the content and the digest algorithm must match the
- // annotators canonical algorithm.
- //
- // Such a facility can be used to map blobs between digest domains, with
- // the restriction that the algorithm of the descriptor must match the
- // canonical algorithm (ie sha256) of the annotator.
- SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
-
- // Clear enables descriptors to be unlinked
- Clear(ctx context.Context, dgst digest.Digest) error
-}
-
-// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
-type BlobDescriptorServiceFactory interface {
- BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
-}
-
-// ReadSeekCloser is the primary reader type for blob data, combining
-// io.ReadSeeker with io.Closer.
-type ReadSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// BlobProvider describes operations for getting blob data.
-type BlobProvider interface {
- // Get returns the entire blob identified by digest along with the descriptor.
- Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
-
- // Open provides a ReadSeekCloser to the blob identified by the provided
- // descriptor. If the blob is not known to the service, an error will be
- // returned.
- Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
-}
-
-// BlobServer can serve blobs via http.
-type BlobServer interface {
- // ServeBlob attempts to serve the blob, identified by dgst, via http. The
- // service may decide to redirect the client elsewhere or serve the data
- // directly.
- //
- // This handler only issues successful responses, such as 2xx or 3xx,
- // meaning it serves data or issues a redirect. If the blob is not
- // available, an error will be returned and the caller may still issue a
- // response.
- //
- // The implementation may serve the same blob from a different digest
- // domain. The appropriate headers will be set for the blob, unless they
- // have already been set by the caller.
- ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
-}
-
-// BlobIngester ingests blob data.
-type BlobIngester interface {
- // Put inserts the content p into the blob service, returning a descriptor
- // or an error.
- Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
-
- // Create allocates a new blob writer to add a blob to this service. The
- // returned handle can be written to and later resumed using an opaque
- // identifier. With this approach, one can Close and Resume a BlobWriter
- // multiple times until the BlobWriter is committed or cancelled.
- Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
-
- // Resume attempts to resume a write to a blob, identified by an id.
- Resume(ctx context.Context, id string) (BlobWriter, error)
-}
-
-// BlobCreateOption is a general extensible function argument for blob creation
-// methods. A BlobIngester may choose to honor any or none of the given
-// BlobCreateOptions, which can be specific to the implementation of the
-// BlobIngester receiving them.
-// TODO (brianbland): unify this with ManifestServiceOption in the future
-type BlobCreateOption interface {
- Apply(interface{}) error
-}
-
-// CreateOptions is a collection of blob creation modifiers relevant to general
-// blob storage intended to be configured by the BlobCreateOption.Apply method.
-type CreateOptions struct {
- Mount struct {
- ShouldMount bool
- From reference.Canonical
- // Stat allows to pass precalculated descriptor to link and return.
- // Blob access check will be skipped if set.
- Stat *Descriptor
- }
-}
-
-// BlobWriter provides a handle for inserting data into a blob store.
-// Instances should be obtained from BlobWriteService.Writer and
-// BlobWriteService.Resume. If supported by the store, a writer can be
-// recovered with the id.
-type BlobWriter interface {
- io.WriteCloser
- io.ReaderFrom
-
- // Size returns the number of bytes written to this blob.
- Size() int64
-
- // ID returns the identifier for this writer. The ID can be used with the
- // Blob service to later resume the write.
- ID() string
-
- // StartedAt returns the time this blob write was started.
- StartedAt() time.Time
-
- // Commit completes the blob writer process. The content is verified
- // against the provided provisional descriptor, which may result in an
- // error. Depending on the implementation, written data may be validated
- // against the provisional descriptor fields. If MediaType is not present,
- // the implementation may reject the commit or assign "application/octet-
- // stream" to the blob. The returned descriptor may have a different
- // digest depending on the blob store, referred to as the canonical
- // descriptor.
- Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
-
- // Cancel ends the blob write without storing any data and frees any
- // associated resources. Any data written thus far will be lost. Cancel
- // implementations should allow multiple calls even after a commit that
- // result in a no-op. This allows use of Cancel in a defer statement,
- // increasing the assurance that it is correctly called.
- Cancel(ctx context.Context) error
-}
-
-// BlobService combines the operations to access, read and write blobs. This
-// can be used to describe remote blob services.
-type BlobService interface {
- BlobStatter
- BlobProvider
- BlobIngester
-}
-
-// BlobStore represent the entire suite of blob related operations. Such an
-// implementation can access, read, write, delete and serve blobs.
-type BlobStore interface {
- BlobService
- BlobServer
- BlobDeleter
-}
diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go
deleted file mode 100644
index 71327dca7..000000000
--- a/vendor/github.com/docker/distribution/digestset/set.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package digestset
-
-import (
- "errors"
- "sort"
- "strings"
- "sync"
-
- digest "github.com/opencontainers/go-digest"
-)
-
-var (
- // ErrDigestNotFound is used when a matching digest
- // could not be found in a set.
- ErrDigestNotFound = errors.New("digest not found")
-
- // ErrDigestAmbiguous is used when multiple digests
- // are found in a set. None of the matching digests
- // should be considered valid matches.
- ErrDigestAmbiguous = errors.New("ambiguous digest string")
-)
-
-// Set is used to hold a unique set of digests which
-// may be easily referenced by easily referenced by a string
-// representation of the digest as well as short representation.
-// The uniqueness of the short representation is based on other
-// digests in the set. If digests are omitted from this set,
-// collisions in a larger set may not be detected, therefore it
-// is important to always do short representation lookups on
-// the complete set of digests. To mitigate collisions, an
-// appropriately long short code should be used.
-type Set struct {
- mutex sync.RWMutex
- entries digestEntries
-}
-
-// NewSet creates an empty set of digests
-// which may have digests added.
-func NewSet() *Set {
- return &Set{
- entries: digestEntries{},
- }
-}
-
-// checkShortMatch checks whether two digests match as either whole
-// values or short values. This function does not test equality,
-// rather whether the second value could match against the first
-// value.
-func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
- if len(hex) == len(shortHex) {
- if hex != shortHex {
- return false
- }
- if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- } else if !strings.HasPrefix(hex, shortHex) {
- return false
- } else if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- return true
-}
-
-// Lookup looks for a digest matching the given string representation.
-// If no digests could be found ErrDigestNotFound will be returned
-// with an empty digest value. If multiple matches are found
-// ErrDigestAmbiguous will be returned with an empty digest value.
-func (dst *Set) Lookup(d string) (digest.Digest, error) {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- if len(dst.entries) == 0 {
- return "", ErrDigestNotFound
- }
- var (
- searchFunc func(int) bool
- alg digest.Algorithm
- hex string
- )
- dgst, err := digest.Parse(d)
- if err == digest.ErrDigestInvalidFormat {
- hex = d
- searchFunc = func(i int) bool {
- return dst.entries[i].val >= d
- }
- } else {
- hex = dgst.Hex()
- alg = dgst.Algorithm()
- searchFunc = func(i int) bool {
- if dst.entries[i].val == hex {
- return dst.entries[i].alg >= alg
- }
- return dst.entries[i].val >= hex
- }
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
- return "", ErrDigestNotFound
- }
- if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
- return dst.entries[idx].digest, nil
- }
- if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
- return "", ErrDigestAmbiguous
- }
-
- return dst.entries[idx].digest, nil
-}
-
-// Add adds the given digest to the set. An error will be returned
-// if the given digest is invalid. If the digest already exists in the
-// set, this operation will be a no-op.
-func (dst *Set) Add(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) {
- dst.entries = append(dst.entries, entry)
- return nil
- } else if dst.entries[idx].digest == d {
- return nil
- }
-
- entries := append(dst.entries, nil)
- copy(entries[idx+1:], entries[idx:len(entries)-1])
- entries[idx] = entry
- dst.entries = entries
- return nil
-}
-
-// Remove removes the given digest from the set. An err will be
-// returned if the given digest is invalid. If the digest does
-// not exist in the set, this operation will be a no-op.
-func (dst *Set) Remove(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- // Not found if idx is after or value at idx is not digest
- if idx == len(dst.entries) || dst.entries[idx].digest != d {
- return nil
- }
-
- entries := dst.entries
- copy(entries[idx:], entries[idx+1:])
- entries = entries[:len(entries)-1]
- dst.entries = entries
-
- return nil
-}
-
-// All returns all the digests in the set
-func (dst *Set) All() []digest.Digest {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- retValues := make([]digest.Digest, len(dst.entries))
- for i := range dst.entries {
- retValues[i] = dst.entries[i].digest
- }
-
- return retValues
-}
-
-// ShortCodeTable returns a map of Digest to unique short codes. The
-// length represents the minimum value, the maximum length may be the
-// entire value of digest if uniqueness cannot be achieved without the
-// full value. This function will attempt to make short codes as short
-// as possible to be unique.
-func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- m := make(map[digest.Digest]string, len(dst.entries))
- l := length
- resetIdx := 0
- for i := 0; i < len(dst.entries); i++ {
- var short string
- extended := true
- for extended {
- extended = false
- if len(dst.entries[i].val) <= l {
- short = dst.entries[i].digest.String()
- } else {
- short = dst.entries[i].val[:l]
- for j := i + 1; j < len(dst.entries); j++ {
- if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
- if j > resetIdx {
- resetIdx = j
- }
- extended = true
- } else {
- break
- }
- }
- if extended {
- l++
- }
- }
- }
- m[dst.entries[i].digest] = short
- if i >= resetIdx {
- l = length
- }
- }
- return m
-}
-
-type digestEntry struct {
- alg digest.Algorithm
- val string
- digest digest.Digest
-}
-
-type digestEntries []*digestEntry
-
-func (d digestEntries) Len() int {
- return len(d)
-}
-
-func (d digestEntries) Less(i, j int) bool {
- if d[i].val != d[j].val {
- return d[i].val < d[j].val
- }
- return d[i].alg < d[j].alg
-}
-
-func (d digestEntries) Swap(i, j int) {
- d[i], d[j] = d[j], d[i]
-}
diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go
deleted file mode 100644
index bdd8cb708..000000000
--- a/vendor/github.com/docker/distribution/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package distribution will define the interfaces for the components of
-// docker distribution. The goal is to allow users to reliably package, ship
-// and store content related to docker images.
-//
-// This is currently a work in progress. More details are available in the
-// README.md.
-package distribution
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
deleted file mode 100644
index 8e0b788d6..000000000
--- a/vendor/github.com/docker/distribution/errors.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package distribution
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/opencontainers/go-digest"
-)
-
-// ErrAccessDenied is returned when an access to a requested resource is
-// denied.
-var ErrAccessDenied = errors.New("access denied")
-
-// ErrManifestNotModified is returned when a conditional manifest GetByTag
-// returns nil due to the client indicating it has the latest version
-var ErrManifestNotModified = errors.New("manifest not modified")
-
-// ErrUnsupported is returned when an unimplemented or unsupported action is
-// performed
-var ErrUnsupported = errors.New("operation unsupported")
-
-// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
-// manifest but the registry is configured to reject it
-var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
-
-// ErrTagUnknown is returned if the given tag is not known by the tag service
-type ErrTagUnknown struct {
- Tag string
-}
-
-func (err ErrTagUnknown) Error() string {
- return fmt.Sprintf("unknown tag=%s", err.Tag)
-}
-
-// ErrRepositoryUnknown is returned if the named repository is not known by
-// the registry.
-type ErrRepositoryUnknown struct {
- Name string
-}
-
-func (err ErrRepositoryUnknown) Error() string {
- return fmt.Sprintf("unknown repository name=%s", err.Name)
-}
-
-// ErrRepositoryNameInvalid should be used to denote an invalid repository
-// name. Reason may set, indicating the cause of invalidity.
-type ErrRepositoryNameInvalid struct {
- Name string
- Reason error
-}
-
-func (err ErrRepositoryNameInvalid) Error() string {
- return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
-}
-
-// ErrManifestUnknown is returned if the manifest is not known by the
-// registry.
-type ErrManifestUnknown struct {
- Name string
- Tag string
-}
-
-func (err ErrManifestUnknown) Error() string {
- return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
-}
-
-// ErrManifestUnknownRevision is returned when a manifest cannot be found by
-// revision within a repository.
-type ErrManifestUnknownRevision struct {
- Name string
- Revision digest.Digest
-}
-
-func (err ErrManifestUnknownRevision) Error() string {
- return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
-}
-
-// ErrManifestUnverified is returned when the registry is unable to verify
-// the manifest.
-type ErrManifestUnverified struct{}
-
-func (ErrManifestUnverified) Error() string {
- return "unverified manifest"
-}
-
-// ErrManifestVerification provides a type to collect errors encountered
-// during manifest verification. Currently, it accepts errors of all types,
-// but it may be narrowed to those involving manifest verification.
-type ErrManifestVerification []error
-
-func (errs ErrManifestVerification) Error() string {
- var parts []string
- for _, err := range errs {
- parts = append(parts, err.Error())
- }
-
- return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
-}
-
-// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
-type ErrManifestBlobUnknown struct {
- Digest digest.Digest
-}
-
-func (err ErrManifestBlobUnknown) Error() string {
- return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
-}
-
-// ErrManifestNameInvalid should be used to denote an invalid manifest
-// name. Reason may set, indicating the cause of invalidity.
-type ErrManifestNameInvalid struct {
- Name string
- Reason error
-}
-
-func (err ErrManifestNameInvalid) Error() string {
- return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
-}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
deleted file mode 100644
index 1816baea1..000000000
--- a/vendor/github.com/docker/distribution/manifests.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package distribution
-
-import (
- "context"
- "fmt"
- "mime"
-
- "github.com/opencontainers/go-digest"
-)
-
-// Manifest represents a registry object specifying a set of
-// references and an optional target
-type Manifest interface {
- // References returns a list of objects which make up this manifest.
- // A reference is anything which can be represented by a
- // distribution.Descriptor. These can consist of layers, resources or other
- // manifests.
- //
- // While no particular order is required, implementations should return
- // them from highest to lowest priority. For example, one might want to
- // return the base layer before the top layer.
- References() []Descriptor
-
- // Payload provides the serialized format of the manifest, in addition to
- // the media type.
- Payload() (mediaType string, payload []byte, err error)
-}
-
-// ManifestBuilder creates a manifest allowing one to include dependencies.
-// Instances can be obtained from a version-specific manifest package. Manifest
-// specific data is passed into the function which creates the builder.
-type ManifestBuilder interface {
- // Build creates the manifest from his builder.
- Build(ctx context.Context) (Manifest, error)
-
- // References returns a list of objects which have been added to this
- // builder. The dependencies are returned in the order they were added,
- // which should be from base to head.
- References() []Descriptor
-
- // AppendReference includes the given object in the manifest after any
- // existing dependencies. If the add fails, such as when adding an
- // unsupported dependency, an error may be returned.
- //
- // The destination of the reference is dependent on the manifest type and
- // the dependency type.
- AppendReference(dependency Describable) error
-}
-
-// ManifestService describes operations on image manifests.
-type ManifestService interface {
- // Exists returns true if the manifest exists.
- Exists(ctx context.Context, dgst digest.Digest) (bool, error)
-
- // Get retrieves the manifest specified by the given digest
- Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
-
- // Put creates or updates the given manifest returning the manifest digest
- Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
-
- // Delete removes the manifest specified by the given digest. Deleting
- // a manifest that doesn't exist will return ErrManifestNotFound
- Delete(ctx context.Context, dgst digest.Digest) error
-}
-
-// ManifestEnumerator enables iterating over manifests
-type ManifestEnumerator interface {
- // Enumerate calls ingester for each manifest.
- Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
-}
-
-// Describable is an interface for descriptors
-type Describable interface {
- Descriptor() Descriptor
-}
-
-// ManifestMediaTypes returns the supported media types for manifests.
-func ManifestMediaTypes() (mediaTypes []string) {
- for t := range mappings {
- if t != "" {
- mediaTypes = append(mediaTypes, t)
- }
- }
- return
-}
-
-// UnmarshalFunc implements manifest unmarshalling a given MediaType
-type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
-
-var mappings = make(map[string]UnmarshalFunc, 0)
-
-// UnmarshalManifest looks up manifest unmarshal functions based on
-// MediaType
-func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
- // Need to look up by the actual media type, not the raw contents of
- // the header. Strip semicolons and anything following them.
- var mediaType string
- if ctHeader != "" {
- var err error
- mediaType, _, err = mime.ParseMediaType(ctHeader)
- if err != nil {
- return nil, Descriptor{}, err
- }
- }
-
- unmarshalFunc, ok := mappings[mediaType]
- if !ok {
- unmarshalFunc, ok = mappings[""]
- if !ok {
- return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
- }
- }
-
- return unmarshalFunc(p)
-}
-
-// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
-// should be called from specific
-func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
- if _, ok := mappings[mediaType]; ok {
- return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
- }
- mappings[mediaType] = u
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go
deleted file mode 100644
index b5a532144..000000000
--- a/vendor/github.com/docker/distribution/metrics/prometheus.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package metrics
-
-import "github.com/docker/go-metrics"
-
-const (
- // NamespacePrefix is the namespace of prometheus metrics
- NamespacePrefix = "registry"
-)
-
-var (
- // StorageNamespace is the prometheus namespace of blob/cache related operations
- StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
-)
diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go
deleted file mode 100644
index 978df7eab..000000000
--- a/vendor/github.com/docker/distribution/reference/helpers.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package reference
-
-import "path"
-
-// IsNameOnly returns true if reference only contains a repo name.
-func IsNameOnly(ref Named) bool {
- if _, ok := ref.(NamedTagged); ok {
- return false
- }
- if _, ok := ref.(Canonical); ok {
- return false
- }
- return true
-}
-
-// FamiliarName returns the familiar name string
-// for the given named, familiarizing if needed.
-func FamiliarName(ref Named) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().Name()
- }
- return ref.Name()
-}
-
-// FamiliarString returns the familiar string representation
-// for the given reference, familiarizing if needed.
-func FamiliarString(ref Reference) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().String()
- }
- return ref.String()
-}
-
-// FamiliarMatch reports whether ref matches the specified pattern.
-// See https://godoc.org/path#Match for supported patterns.
-func FamiliarMatch(pattern string, ref Reference) (bool, error) {
- matched, err := path.Match(pattern, FamiliarString(ref))
- if namedRef, isNamed := ref.(Named); isNamed && !matched {
- matched, _ = path.Match(pattern, FamiliarName(namedRef))
- }
- return matched, err
-}
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
deleted file mode 100644
index 2d71fc5e9..000000000
--- a/vendor/github.com/docker/distribution/reference/normalize.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/docker/distribution/digestset"
- "github.com/opencontainers/go-digest"
-)
-
-var (
- legacyDefaultDomain = "index.docker.io"
- defaultDomain = "docker.io"
- officialRepoName = "library"
- defaultTag = "latest"
-)
-
-// normalizedNamed represents a name which has been
-// normalized and has a familiar form. A familiar name
-// is what is used in Docker UI. An example normalized
-// name is "docker.io/library/ubuntu" and corresponding
-// familiar name of "ubuntu".
-type normalizedNamed interface {
- Named
- Familiar() Named
-}
-
-// ParseNormalizedNamed parses a string into a named reference
-// transforming a familiar name from Docker UI to a fully
-// qualified reference. If the value may be an identifier
-// use ParseAnyReference.
-func ParseNormalizedNamed(s string) (Named, error) {
- if ok := anchoredIdentifierRegexp.MatchString(s); ok {
- return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
- }
- domain, remainder := splitDockerDomain(s)
- var remoteName string
- if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
- remoteName = remainder[:tagSep]
- } else {
- remoteName = remainder
- }
- if strings.ToLower(remoteName) != remoteName {
- return nil, errors.New("invalid reference format: repository name must be lowercase")
- }
-
- ref, err := Parse(domain + "/" + remainder)
- if err != nil {
- return nil, err
- }
- named, isNamed := ref.(Named)
- if !isNamed {
- return nil, fmt.Errorf("reference %s has no name", ref.String())
- }
- return named, nil
-}
-
-// splitDockerDomain splits a repository name to domain and remotename string.
-// If no valid domain is found, the default domain is used. Repository name
-// needs to be already validated before.
-func splitDockerDomain(name string) (domain, remainder string) {
- i := strings.IndexRune(name, '/')
- if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
- domain, remainder = defaultDomain, name
- } else {
- domain, remainder = name[:i], name[i+1:]
- }
- if domain == legacyDefaultDomain {
- domain = defaultDomain
- }
- if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
- remainder = officialRepoName + "/" + remainder
- }
- return
-}
-
-// familiarizeName returns a shortened version of the name familiar
-// to to the Docker UI. Familiar names have the default domain
-// "docker.io" and "library/" repository prefix removed.
-// For example, "docker.io/library/redis" will have the familiar
-// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
-// Returns a familiarized named only reference.
-func familiarizeName(named namedRepository) repository {
- repo := repository{
- domain: named.Domain(),
- path: named.Path(),
- }
-
- if repo.domain == defaultDomain {
- repo.domain = ""
- // Handle official repositories which have the pattern "library/"
- if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
- repo.path = split[1]
- }
- }
- return repo
-}
-
-func (r reference) Familiar() Named {
- return reference{
- namedRepository: familiarizeName(r.namedRepository),
- tag: r.tag,
- digest: r.digest,
- }
-}
-
-func (r repository) Familiar() Named {
- return familiarizeName(r)
-}
-
-func (t taggedReference) Familiar() Named {
- return taggedReference{
- namedRepository: familiarizeName(t.namedRepository),
- tag: t.tag,
- }
-}
-
-func (c canonicalReference) Familiar() Named {
- return canonicalReference{
- namedRepository: familiarizeName(c.namedRepository),
- digest: c.digest,
- }
-}
-
-// TagNameOnly adds the default tag "latest" to a reference if it only has
-// a repo name.
-func TagNameOnly(ref Named) Named {
- if IsNameOnly(ref) {
- namedTagged, err := WithTag(ref, defaultTag)
- if err != nil {
- // Default tag must be valid, to create a NamedTagged
- // type with non-validated input the WithTag function
- // should be used instead
- panic(err)
- }
- return namedTagged
- }
- return ref
-}
-
-// ParseAnyReference parses a reference string as a possible identifier,
-// full digest, or familiar name.
-func ParseAnyReference(ref string) (Reference, error) {
- if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
- return digestReference("sha256:" + ref), nil
- }
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
-
- return ParseNormalizedNamed(ref)
-}
-
-// ParseAnyReferenceWithSet parses a reference string as a possible short
-// identifier to be matched in a digest set, a full digest, or familiar name.
-func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
- if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
- dgst, err := ds.Lookup(ref)
- if err == nil {
- return digestReference(dgst), nil
- }
- } else {
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
- }
-
- return ParseNormalizedNamed(ref)
-}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
deleted file mode 100644
index 2f66cca87..000000000
--- a/vendor/github.com/docker/distribution/reference/reference.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Package reference provides a general type to represent any way of referencing images within the registry.
-// Its main purpose is to abstract tags and digests (content-addressable hash).
-//
-// Grammar
-//
-// reference := name [ ":" tag ] [ "@" digest ]
-// name := [domain '/'] path-component ['/' path-component]*
-// domain := domain-component ['.' domain-component]* [':' port-number]
-// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
-// port-number := /[0-9]+/
-// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
-// separator := /[_.]|__|[-]*/
-//
-// tag := /[\w][\w.-]{0,127}/
-//
-// digest := digest-algorithm ":" digest-hex
-// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
-// digest-algorithm-separator := /[+.-_]/
-// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
-// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
-//
-// identifier := /[a-f0-9]{64}/
-// short-identifier := /[a-f0-9]{6,64}/
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/opencontainers/go-digest"
-)
-
-const (
- // NameTotalLengthMax is the maximum total number of characters in a repository name.
- NameTotalLengthMax = 255
-)
-
-var (
- // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
- ErrReferenceInvalidFormat = errors.New("invalid reference format")
-
- // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
- ErrTagInvalidFormat = errors.New("invalid tag format")
-
- // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
- ErrDigestInvalidFormat = errors.New("invalid digest format")
-
- // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
- ErrNameContainsUppercase = errors.New("repository name must be lowercase")
-
- // ErrNameEmpty is returned for empty, invalid repository names.
- ErrNameEmpty = errors.New("repository name must have at least one component")
-
- // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
- ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
-
- // ErrNameNotCanonical is returned when a name is not canonical.
- ErrNameNotCanonical = errors.New("repository name must be canonical")
-)
-
-// Reference is an opaque object reference identifier that may include
-// modifiers such as a hostname, name, tag, and digest.
-type Reference interface {
- // String returns the full reference
- String() string
-}
-
-// Field provides a wrapper type for resolving correct reference types when
-// working with encoding.
-type Field struct {
- reference Reference
-}
-
-// AsField wraps a reference in a Field for encoding.
-func AsField(reference Reference) Field {
- return Field{reference}
-}
-
-// Reference unwraps the reference type from the field to
-// return the Reference object. This object should be
-// of the appropriate type to further check for different
-// reference types.
-func (f Field) Reference() Reference {
- return f.reference
-}
-
-// MarshalText serializes the field to byte text which
-// is the string of the reference.
-func (f Field) MarshalText() (p []byte, err error) {
- return []byte(f.reference.String()), nil
-}
-
-// UnmarshalText parses text bytes by invoking the
-// reference parser to ensure the appropriately
-// typed reference object is wrapped by field.
-func (f *Field) UnmarshalText(p []byte) error {
- r, err := Parse(string(p))
- if err != nil {
- return err
- }
-
- f.reference = r
- return nil
-}
-
-// Named is an object with a full name
-type Named interface {
- Reference
- Name() string
-}
-
-// Tagged is an object which has a tag
-type Tagged interface {
- Reference
- Tag() string
-}
-
-// NamedTagged is an object including a name and tag.
-type NamedTagged interface {
- Named
- Tag() string
-}
-
-// Digested is an object which has a digest
-// in which it can be referenced by
-type Digested interface {
- Reference
- Digest() digest.Digest
-}
-
-// Canonical reference is an object with a fully unique
-// name including a name with domain and digest
-type Canonical interface {
- Named
- Digest() digest.Digest
-}
-
-// namedRepository is a reference to a repository with a name.
-// A namedRepository has both domain and path components.
-type namedRepository interface {
- Named
- Domain() string
- Path() string
-}
-
-// Domain returns the domain part of the Named reference
-func Domain(named Named) string {
- if r, ok := named.(namedRepository); ok {
- return r.Domain()
- }
- domain, _ := splitDomain(named.Name())
- return domain
-}
-
-// Path returns the name without the domain part of the Named reference
-func Path(named Named) (name string) {
- if r, ok := named.(namedRepository); ok {
- return r.Path()
- }
- _, path := splitDomain(named.Name())
- return path
-}
-
-func splitDomain(name string) (string, string) {
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if len(match) != 3 {
- return "", name
- }
- return match[1], match[2]
-}
-
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-// DEPRECATED: Use Domain or Path
-func SplitHostname(named Named) (string, string) {
- if r, ok := named.(namedRepository); ok {
- return r.Domain(), r.Path()
- }
- return splitDomain(named.Name())
-}
-
-// Parse parses s and returns a syntactically valid Reference.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: Parse will not handle short digests.
-func Parse(s string) (Reference, error) {
- matches := ReferenceRegexp.FindStringSubmatch(s)
- if matches == nil {
- if s == "" {
- return nil, ErrNameEmpty
- }
- if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
- return nil, ErrNameContainsUppercase
- }
- return nil, ErrReferenceInvalidFormat
- }
-
- if len(matches[1]) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- var repo repository
-
- nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
- if nameMatch != nil && len(nameMatch) == 3 {
- repo.domain = nameMatch[1]
- repo.path = nameMatch[2]
- } else {
- repo.domain = ""
- repo.path = matches[1]
- }
-
- ref := reference{
- namedRepository: repo,
- tag: matches[2],
- }
- if matches[3] != "" {
- var err error
- ref.digest, err = digest.Parse(matches[3])
- if err != nil {
- return nil, err
- }
- }
-
- r := getBestReferenceType(ref)
- if r == nil {
- return nil, ErrNameEmpty
- }
-
- return r, nil
-}
-
-// ParseNamed parses s and returns a syntactically valid reference implementing
-// the Named interface. The reference must have a name and be in the canonical
-// form, otherwise an error is returned.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: ParseNamed will not handle short digests.
-func ParseNamed(s string) (Named, error) {
- named, err := ParseNormalizedNamed(s)
- if err != nil {
- return nil, err
- }
- if named.String() != s {
- return nil, ErrNameNotCanonical
- }
- return named, nil
-}
-
-// WithName returns a named object representing the given string. If the input
-// is invalid ErrReferenceInvalidFormat will be returned.
-func WithName(name string) (Named, error) {
- if len(name) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if match == nil || len(match) != 3 {
- return nil, ErrReferenceInvalidFormat
- }
- return repository{
- domain: match[1],
- path: match[2],
- }, nil
-}
-
-// WithTag combines the name from "name" and the tag from "tag" to form a
-// reference incorporating both the name and the tag.
-func WithTag(name Named, tag string) (NamedTagged, error) {
- if !anchoredTagRegexp.MatchString(tag) {
- return nil, ErrTagInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if canonical, ok := name.(Canonical); ok {
- return reference{
- namedRepository: repo,
- tag: tag,
- digest: canonical.Digest(),
- }, nil
- }
- return taggedReference{
- namedRepository: repo,
- tag: tag,
- }, nil
-}
-
-// WithDigest combines the name from "name" and the digest from "digest" to form
-// a reference incorporating both the name and the digest.
-func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
- if !anchoredDigestRegexp.MatchString(digest.String()) {
- return nil, ErrDigestInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if tagged, ok := name.(Tagged); ok {
- return reference{
- namedRepository: repo,
- tag: tagged.Tag(),
- digest: digest,
- }, nil
- }
- return canonicalReference{
- namedRepository: repo,
- digest: digest,
- }, nil
-}
-
-// TrimNamed removes any tag or digest from the named reference.
-func TrimNamed(ref Named) Named {
- domain, path := SplitHostname(ref)
- return repository{
- domain: domain,
- path: path,
- }
-}
-
-func getBestReferenceType(ref reference) Reference {
- if ref.Name() == "" {
- // Allow digest only references
- if ref.digest != "" {
- return digestReference(ref.digest)
- }
- return nil
- }
- if ref.tag == "" {
- if ref.digest != "" {
- return canonicalReference{
- namedRepository: ref.namedRepository,
- digest: ref.digest,
- }
- }
- return ref.namedRepository
- }
- if ref.digest == "" {
- return taggedReference{
- namedRepository: ref.namedRepository,
- tag: ref.tag,
- }
- }
-
- return ref
-}
-
-type reference struct {
- namedRepository
- tag string
- digest digest.Digest
-}
-
-func (r reference) String() string {
- return r.Name() + ":" + r.tag + "@" + r.digest.String()
-}
-
-func (r reference) Tag() string {
- return r.tag
-}
-
-func (r reference) Digest() digest.Digest {
- return r.digest
-}
-
-type repository struct {
- domain string
- path string
-}
-
-func (r repository) String() string {
- return r.Name()
-}
-
-func (r repository) Name() string {
- if r.domain == "" {
- return r.path
- }
- return r.domain + "/" + r.path
-}
-
-func (r repository) Domain() string {
- return r.domain
-}
-
-func (r repository) Path() string {
- return r.path
-}
-
-type digestReference digest.Digest
-
-func (d digestReference) String() string {
- return digest.Digest(d).String()
-}
-
-func (d digestReference) Digest() digest.Digest {
- return digest.Digest(d)
-}
-
-type taggedReference struct {
- namedRepository
- tag string
-}
-
-func (t taggedReference) String() string {
- return t.Name() + ":" + t.tag
-}
-
-func (t taggedReference) Tag() string {
- return t.tag
-}
-
-type canonicalReference struct {
- namedRepository
- digest digest.Digest
-}
-
-func (c canonicalReference) String() string {
- return c.Name() + "@" + c.digest.String()
-}
-
-func (c canonicalReference) Digest() digest.Digest {
- return c.digest
-}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
deleted file mode 100644
index 786034932..000000000
--- a/vendor/github.com/docker/distribution/reference/regexp.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package reference
-
-import "regexp"
-
-var (
- // alphaNumericRegexp defines the alpha numeric atom, typically a
- // component of names. This only allows lower case characters and digits.
- alphaNumericRegexp = match(`[a-z0-9]+`)
-
- // separatorRegexp defines the separators allowed to be embedded in name
- // components. This allow one period, one or two underscore and multiple
- // dashes.
- separatorRegexp = match(`(?:[._]|__|[-]*)`)
-
- // nameComponentRegexp restricts registry path component names to start
- // with at least one letter or number, with following parts able to be
- // separated by one period, one or two underscore and multiple dashes.
- nameComponentRegexp = expression(
- alphaNumericRegexp,
- optional(repeated(separatorRegexp, alphaNumericRegexp)))
-
- // domainComponentRegexp restricts the registry domain component of a
- // repository name to start with a component as defined by DomainRegexp
- // and followed by an optional port.
- domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
-
- // DomainRegexp defines the structure of potential domain components
- // that may be part of image names. This is purposely a subset of what is
- // allowed by DNS to ensure backwards compatibility with Docker image
- // names.
- DomainRegexp = expression(
- domainComponentRegexp,
- optional(repeated(literal(`.`), domainComponentRegexp)),
- optional(literal(`:`), match(`[0-9]+`)))
-
- // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
- TagRegexp = match(`[\w][\w.-]{0,127}`)
-
- // anchoredTagRegexp matches valid tag names, anchored at the start and
- // end of the matched string.
- anchoredTagRegexp = anchored(TagRegexp)
-
- // DigestRegexp matches valid digests.
- DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
-
- // anchoredDigestRegexp matches valid digests, anchored at the start and
- // end of the matched string.
- anchoredDigestRegexp = anchored(DigestRegexp)
-
- // NameRegexp is the format for the name component of references. The
- // regexp has capturing groups for the domain and name part omitting
- // the separating forward slash from either.
- NameRegexp = expression(
- optional(DomainRegexp, literal(`/`)),
- nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp)))
-
- // anchoredNameRegexp is used to parse a name value, capturing the
- // domain and trailing components.
- anchoredNameRegexp = anchored(
- optional(capture(DomainRegexp), literal(`/`)),
- capture(nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp))))
-
- // ReferenceRegexp is the full supported format of a reference. The regexp
- // is anchored and has capturing groups for name, tag, and digest
- // components.
- ReferenceRegexp = anchored(capture(NameRegexp),
- optional(literal(":"), capture(TagRegexp)),
- optional(literal("@"), capture(DigestRegexp)))
-
- // IdentifierRegexp is the format for string identifier used as a
- // content addressable identifier using sha256. These identifiers
- // are like digests without the algorithm, since sha256 is used.
- IdentifierRegexp = match(`([a-f0-9]{64})`)
-
- // ShortIdentifierRegexp is the format used to represent a prefix
- // of an identifier. A prefix may be used to match a sha256 identifier
- // within a list of trusted identifiers.
- ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
-
- // anchoredIdentifierRegexp is used to check or match an
- // identifier value, anchored at start and end of string.
- anchoredIdentifierRegexp = anchored(IdentifierRegexp)
-
- // anchoredShortIdentifierRegexp is used to check if a value
- // is a possible identifier prefix, anchored at start and end
- // of string.
- anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
-)
-
-// match compiles the string to a regular expression.
-var match = regexp.MustCompile
-
-// literal compiles s into a literal regular expression, escaping any regexp
-// reserved characters.
-func literal(s string) *regexp.Regexp {
- re := match(regexp.QuoteMeta(s))
-
- if _, complete := re.LiteralPrefix(); !complete {
- panic("must be a literal")
- }
-
- return re
-}
-
-// expression defines a full expression, where each regular expression must
-// follow the previous.
-func expression(res ...*regexp.Regexp) *regexp.Regexp {
- var s string
- for _, re := range res {
- s += re.String()
- }
-
- return match(s)
-}
-
-// optional wraps the expression in a non-capturing group and makes the
-// production optional.
-func optional(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `?`)
-}
-
-// repeated wraps the regexp in a non-capturing group to get one or more
-// matches.
-func repeated(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `+`)
-}
-
-// group wraps the regexp in a non-capturing group.
-func group(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(?:` + expression(res...).String() + `)`)
-}
-
-// capture wraps the expression in a capturing group.
-func capture(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(` + expression(res...).String() + `)`)
-}
-
-// anchored anchors the regular expression by adding start and end delimiters.
-func anchored(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`^` + expression(res...).String() + `$`)
-}
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
deleted file mode 100644
index 6c3210989..000000000
--- a/vendor/github.com/docker/distribution/registry.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package distribution
-
-import (
- "context"
-
- "github.com/docker/distribution/reference"
-)
-
-// Scope defines the set of items that match a namespace.
-type Scope interface {
- // Contains returns true if the name belongs to the namespace.
- Contains(name string) bool
-}
-
-type fullScope struct{}
-
-func (f fullScope) Contains(string) bool {
- return true
-}
-
-// GlobalScope represents the full namespace scope which contains
-// all other scopes.
-var GlobalScope = Scope(fullScope{})
-
-// Namespace represents a collection of repositories, addressable by name.
-// Generally, a namespace is backed by a set of one or more services,
-// providing facilities such as registry access, trust, and indexing.
-type Namespace interface {
- // Scope describes the names that can be used with this Namespace. The
- // global namespace will have a scope that matches all names. The scope
- // effectively provides an identity for the namespace.
- Scope() Scope
-
- // Repository should return a reference to the named repository. The
- // registry may or may not have the repository but should always return a
- // reference.
- Repository(ctx context.Context, name reference.Named) (Repository, error)
-
- // Repositories fills 'repos' with a lexicographically sorted catalog of repositories
- // up to the size of 'repos' and returns the value 'n' for the number of entries
- // which were filled. 'last' contains an offset in the catalog, and 'err' will be
- // set to io.EOF if there are no more entries to obtain.
- Repositories(ctx context.Context, repos []string, last string) (n int, err error)
-
- // Blobs returns a blob enumerator to access all blobs
- Blobs() BlobEnumerator
-
- // BlobStatter returns a BlobStatter to control
- BlobStatter() BlobStatter
-}
-
-// RepositoryEnumerator describes an operation to enumerate repositories
-type RepositoryEnumerator interface {
- Enumerate(ctx context.Context, ingester func(string) error) error
-}
-
-// RepositoryRemover removes given repository
-type RepositoryRemover interface {
- Remove(ctx context.Context, name reference.Named) error
-}
-
-// ManifestServiceOption is a function argument for Manifest Service methods
-type ManifestServiceOption interface {
- Apply(ManifestService) error
-}
-
-// WithTag allows a tag to be passed into Put
-func WithTag(tag string) ManifestServiceOption {
- return WithTagOption{tag}
-}
-
-// WithTagOption holds a tag
-type WithTagOption struct{ Tag string }
-
-// Apply conforms to the ManifestServiceOption interface
-func (o WithTagOption) Apply(m ManifestService) error {
- // no implementation
- return nil
-}
-
-// WithManifestMediaTypes lists the media types the client wishes
-// the server to provide.
-func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
- return WithManifestMediaTypesOption{mediaTypes}
-}
-
-// WithManifestMediaTypesOption holds a list of accepted media types
-type WithManifestMediaTypesOption struct{ MediaTypes []string }
-
-// Apply conforms to the ManifestServiceOption interface
-func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
- // no implementation
- return nil
-}
-
-// Repository is a named collection of manifests and layers.
-type Repository interface {
- // Named returns the name of the repository.
- Named() reference.Named
-
- // Manifests returns a reference to this repository's manifest service.
- // with the supplied options applied.
- Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
-
- // Blobs returns a reference to this repository's blob service.
- Blobs(ctx context.Context) BlobStore
-
- // TODO(stevvooe): The above BlobStore return can probably be relaxed to
- // be a BlobService for use with clients. This will allow such
- // implementations to avoid implementing ServeBlob.
-
- // Tags returns a reference to this repositories tag service
- Tags(ctx context.Context) TagService
-}
-
-// TODO(stevvooe): Must add close methods to all these. May want to change the
-// way instances are created to better reflect internal dependency
-// relationships.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
index 6d9bb4b62..4c35b879a 100644
--- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
for _, daErr := range errs {
var err Error
- switch daErr.(type) {
+ switch daErr := daErr.(type) {
case ErrorCode:
- err = daErr.(ErrorCode).WithDetail(nil)
+ err = daErr.WithDetail(nil)
case Error:
- err = daErr.(Error)
+ err = daErr
default:
err = ErrorCodeUnknown.WithDetail(daErr)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
index a9616c58a..7fceefbc6 100644
--- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
+++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -4,7 +4,7 @@ import (
"net/http"
"regexp"
- "github.com/docker/distribution/reference"
+ "github.com/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/opencontainers/go-digest"
)
@@ -134,6 +134,19 @@ var (
},
}
+ invalidPaginationResponseDescriptor = ResponseDescriptor{
+ Name: "Invalid pagination number",
+ Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.",
+ StatusCode: http.StatusBadRequest,
+ Body: BodyDescriptor{
+ ContentType: "application/json",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodePaginationNumberInvalid,
+ },
+ }
+
repositoryNotFoundResponseDescriptor = ResponseDescriptor{
Name: "No Such Repository Error",
StatusCode: http.StatusNotFound,
@@ -490,6 +503,7 @@ var routeDescriptors = []RouteDescriptor{
},
},
Failures: []ResponseDescriptor{
+ invalidPaginationResponseDescriptor,
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
@@ -1578,6 +1592,9 @@ var routeDescriptors = []RouteDescriptor{
},
},
},
+ Failures: []ResponseDescriptor{
+ invalidPaginationResponseDescriptor,
+ },
},
},
},
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
index 97d6923aa..87e9f3c14 100644
--- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go
+++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
@@ -133,4 +133,13 @@ var (
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
})
+
+ ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "PAGINATION_NUMBER_INVALID",
+ Message: "invalid number of results requested",
+ Description: `Returned when the "n" parameter (number of results
+ to return) is not an integer, "n" is negative or "n" is bigger than
+ the maximum allowed.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
index 1337bdb12..ab6406335 100644
--- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go
+++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
@@ -6,7 +6,7 @@ import (
"net/url"
"strings"
- "github.com/docker/distribution/reference"
+ "github.com/distribution/reference"
"github.com/gorilla/mux"
)
@@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
u.RawQuery = merged.Encode()
return u
}
-
-// appendValues appends the parameters to the url. Panics if the string is not
-// a url.
-func appendValues(u string, values ...url.Values) string {
- up, err := url.Parse(u)
-
- if err != nil {
- panic(err) // should never happen
- }
-
- return appendValuesURL(up, values...).String()
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
index 6e3f1ccc4..fe238210c 100644
--- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -117,8 +117,8 @@ func init() {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
- isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
- if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
deleted file mode 100644
index 695bf852f..000000000
--- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package client
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/docker/distribution"
-)
-
-type httpBlobUpload struct {
- statter distribution.BlobStatter
- client *http.Client
-
- uuid string
- startedAt time.Time
-
- location string // always the last value of the location header.
- offset int64
- closed bool
-}
-
-func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
- panic("Not implemented")
-}
-
-func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
- if resp.StatusCode == http.StatusNotFound {
- return distribution.ErrBlobUploadUnknown
- }
- return HandleErrorResponse(resp)
-}
-
-func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
- req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
- if err != nil {
- return 0, err
- }
- defer req.Body.Close()
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return 0, err
- }
-
- if !SuccessStatus(resp.StatusCode) {
- return 0, hbu.handleErrorResponse(resp)
- }
-
- hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
- hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
- if err != nil {
- return 0, err
- }
- rng := resp.Header.Get("Range")
- var start, end int64
- if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
- return 0, err
- } else if n != 2 || end < start {
- return 0, fmt.Errorf("bad range format: %s", rng)
- }
-
- return (end - start + 1), nil
-
-}
-
-func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
- req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
- if err != nil {
- return 0, err
- }
- req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
- req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
- req.Header.Set("Content-Type", "application/octet-stream")
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return 0, err
- }
-
- if !SuccessStatus(resp.StatusCode) {
- return 0, hbu.handleErrorResponse(resp)
- }
-
- hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
- hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
- if err != nil {
- return 0, err
- }
- rng := resp.Header.Get("Range")
- var start, end int
- if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
- return 0, err
- } else if n != 2 || end < start {
- return 0, fmt.Errorf("bad range format: %s", rng)
- }
-
- return (end - start + 1), nil
-
-}
-
-func (hbu *httpBlobUpload) Size() int64 {
- return hbu.offset
-}
-
-func (hbu *httpBlobUpload) ID() string {
- return hbu.uuid
-}
-
-func (hbu *httpBlobUpload) StartedAt() time.Time {
- return hbu.startedAt
-}
-
-func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
- // TODO(dmcgowan): Check if already finished, if so just fetch
- req, err := http.NewRequest("PUT", hbu.location, nil)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- values := req.URL.Query()
- values.Set("digest", desc.Digest.String())
- req.URL.RawQuery = values.Encode()
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if !SuccessStatus(resp.StatusCode) {
- return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
- }
-
- return hbu.statter.Stat(ctx, desc.Digest)
-}
-
-func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
- req, err := http.NewRequest("DELETE", hbu.location, nil)
- if err != nil {
- return err
- }
- resp, err := hbu.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
- return nil
- }
- return hbu.handleErrorResponse(resp)
-}
-
-func (hbu *httpBlobUpload) Close() error {
- hbu.closed = true
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go
deleted file mode 100644
index 52d49d5d2..000000000
--- a/vendor/github.com/docker/distribution/registry/client/errors.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/distribution/registry/api/errcode"
- "github.com/docker/distribution/registry/client/auth/challenge"
-)
-
-// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
-// errcode.Errors slice.
-var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
-
-// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
-// returned when making a registry api call.
-type UnexpectedHTTPStatusError struct {
- Status string
-}
-
-func (e *UnexpectedHTTPStatusError) Error() string {
- return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
-}
-
-// UnexpectedHTTPResponseError is returned when an expected HTTP status code
-// is returned, but the content was unexpected and failed to be parsed.
-type UnexpectedHTTPResponseError struct {
- ParseErr error
- StatusCode int
- Response []byte
-}
-
-func (e *UnexpectedHTTPResponseError) Error() string {
- return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
-}
-
-func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
- var errors errcode.Errors
- body, err := ioutil.ReadAll(r)
- if err != nil {
- return err
- }
-
- // For backward compatibility, handle irregularly formatted
- // messages that contain a "details" field.
- var detailsErr struct {
- Details string `json:"details"`
- }
- err = json.Unmarshal(body, &detailsErr)
- if err == nil && detailsErr.Details != "" {
- switch statusCode {
- case http.StatusUnauthorized:
- return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
- case http.StatusTooManyRequests:
- return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
- default:
- return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
- }
- }
-
- if err := json.Unmarshal(body, &errors); err != nil {
- return &UnexpectedHTTPResponseError{
- ParseErr: err,
- StatusCode: statusCode,
- Response: body,
- }
- }
-
- if len(errors) == 0 {
- // If there was no error specified in the body, return
- // UnexpectedHTTPResponseError.
- return &UnexpectedHTTPResponseError{
- ParseErr: ErrNoErrorsInBody,
- StatusCode: statusCode,
- Response: body,
- }
- }
-
- return errors
-}
-
-func makeErrorList(err error) []error {
- if errL, ok := err.(errcode.Errors); ok {
- return []error(errL)
- }
- return []error{err}
-}
-
-func mergeErrors(err1, err2 error) error {
- return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
-}
-
-// HandleErrorResponse returns error parsed from HTTP response for an
-// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
-// UnexpectedHTTPStatusError returned for response code outside of expected
-// range.
-func HandleErrorResponse(resp *http.Response) error {
- if resp.StatusCode >= 400 && resp.StatusCode < 500 {
- // Check for OAuth errors within the `WWW-Authenticate` header first
- // See https://tools.ietf.org/html/rfc6750#section-3
- for _, c := range challenge.ResponseChallenges(resp) {
- if c.Scheme == "bearer" {
- var err errcode.Error
- // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
- switch c.Parameters["error"] {
- case "invalid_token":
- err.Code = errcode.ErrorCodeUnauthorized
- case "insufficient_scope":
- err.Code = errcode.ErrorCodeDenied
- default:
- continue
- }
- if description := c.Parameters["error_description"]; description != "" {
- err.Message = description
- } else {
- err.Message = err.Code.Message()
- }
-
- return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
- }
- }
- err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
- if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
- return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
- }
- return err
- }
- return &UnexpectedHTTPStatusError{Status: resp.Status}
-}
-
-// SuccessStatus returns true if the argument is a successful HTTP response
-// code (in the range 200 - 399 inclusive).
-func SuccessStatus(status int) bool {
- return status >= 200 && status <= 399
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
deleted file mode 100644
index aa442e654..000000000
--- a/vendor/github.com/docker/distribution/registry/client/repository.go
+++ /dev/null
@@ -1,867 +0,0 @@
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/api/v2"
- "github.com/docker/distribution/registry/client/transport"
- "github.com/docker/distribution/registry/storage/cache"
- "github.com/docker/distribution/registry/storage/cache/memory"
- "github.com/opencontainers/go-digest"
-)
-
-// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
-type Registry interface {
- Repositories(ctx context.Context, repos []string, last string) (n int, err error)
-}
-
-// checkHTTPRedirect is a callback that can manipulate redirected HTTP
-// requests. It is used to preserve Accept and Range headers.
-func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
- if len(via) >= 10 {
- return errors.New("stopped after 10 redirects")
- }
-
- if len(via) > 0 {
- for headerName, headerVals := range via[0].Header {
- if headerName != "Accept" && headerName != "Range" {
- continue
- }
- for _, val := range headerVals {
- // Don't add to redirected request if redirected
- // request already has a header with the same
- // name and value.
- hasValue := false
- for _, existingVal := range req.Header[headerName] {
- if existingVal == val {
- hasValue = true
- break
- }
- }
- if !hasValue {
- req.Header.Add(headerName, val)
- }
- }
- }
- }
-
- return nil
-}
-
-// NewRegistry creates a registry namespace which can be used to get a listing of repositories
-func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
- ub, err := v2.NewURLBuilderFromString(baseURL, false)
- if err != nil {
- return nil, err
- }
-
- client := &http.Client{
- Transport: transport,
- Timeout: 1 * time.Minute,
- CheckRedirect: checkHTTPRedirect,
- }
-
- return ®istry{
- client: client,
- ub: ub,
- }, nil
-}
-
-type registry struct {
- client *http.Client
- ub *v2.URLBuilder
-}
-
-// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
-// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
-// are no more entries
-func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
- var numFilled int
- var returnErr error
-
- values := buildCatalogValues(len(entries), last)
- u, err := r.ub.BuildCatalogURL(values)
- if err != nil {
- return 0, err
- }
-
- resp, err := r.client.Get(u)
- if err != nil {
- return 0, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- var ctlg struct {
- Repositories []string `json:"repositories"`
- }
- decoder := json.NewDecoder(resp.Body)
-
- if err := decoder.Decode(&ctlg); err != nil {
- return 0, err
- }
-
- for cnt := range ctlg.Repositories {
- entries[cnt] = ctlg.Repositories[cnt]
- }
- numFilled = len(ctlg.Repositories)
-
- link := resp.Header.Get("Link")
- if link == "" {
- returnErr = io.EOF
- }
- } else {
- return 0, HandleErrorResponse(resp)
- }
-
- return numFilled, returnErr
-}
-
-// NewRepository creates a new Repository for the given repository name and base URL.
-func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
- ub, err := v2.NewURLBuilderFromString(baseURL, false)
- if err != nil {
- return nil, err
- }
-
- client := &http.Client{
- Transport: transport,
- CheckRedirect: checkHTTPRedirect,
- // TODO(dmcgowan): create cookie jar
- }
-
- return &repository{
- client: client,
- ub: ub,
- name: name,
- }, nil
-}
-
-type repository struct {
- client *http.Client
- ub *v2.URLBuilder
- name reference.Named
-}
-
-func (r *repository) Named() reference.Named {
- return r.name
-}
-
-func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
- statter := &blobStatter{
- name: r.name,
- ub: r.ub,
- client: r.client,
- }
- return &blobs{
- name: r.name,
- ub: r.ub,
- client: r.client,
- statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
- }
-}
-
-func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
- // todo(richardscothern): options should be sent over the wire
- return &manifests{
- name: r.name,
- ub: r.ub,
- client: r.client,
- etags: make(map[string]string),
- }, nil
-}
-
-func (r *repository) Tags(ctx context.Context) distribution.TagService {
- return &tags{
- client: r.client,
- ub: r.ub,
- name: r.Named(),
- }
-}
-
-// tags implements remote tagging operations.
-type tags struct {
- client *http.Client
- ub *v2.URLBuilder
- name reference.Named
-}
-
-// All returns all tags
-func (t *tags) All(ctx context.Context) ([]string, error) {
- var tags []string
-
- listURLStr, err := t.ub.BuildTagsURL(t.name)
- if err != nil {
- return tags, err
- }
-
- listURL, err := url.Parse(listURLStr)
- if err != nil {
- return tags, err
- }
-
- for {
- resp, err := t.client.Get(listURL.String())
- if err != nil {
- return tags, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return tags, err
- }
-
- tagsResponse := struct {
- Tags []string `json:"tags"`
- }{}
- if err := json.Unmarshal(b, &tagsResponse); err != nil {
- return tags, err
- }
- tags = append(tags, tagsResponse.Tags...)
- if link := resp.Header.Get("Link"); link != "" {
- linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
- linkURL, err := url.Parse(linkURLStr)
- if err != nil {
- return tags, err
- }
-
- listURL = listURL.ResolveReference(linkURL)
- } else {
- return tags, nil
- }
- } else {
- return tags, HandleErrorResponse(resp)
- }
- }
-}
-
-func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
- desc := distribution.Descriptor{}
- headers := response.Header
-
- ctHeader := headers.Get("Content-Type")
- if ctHeader == "" {
- return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
- }
- desc.MediaType = ctHeader
-
- digestHeader := headers.Get("Docker-Content-Digest")
- if digestHeader == "" {
- bytes, err := ioutil.ReadAll(response.Body)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- return desc, nil
- }
-
- dgst, err := digest.Parse(digestHeader)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- desc.Digest = dgst
-
- lengthHeader := headers.Get("Content-Length")
- if lengthHeader == "" {
- return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
- }
- length, err := strconv.ParseInt(lengthHeader, 10, 64)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- desc.Size = length
-
- return desc, nil
-
-}
-
-// Get issues a HEAD request for a Manifest against its named endpoint in order
-// to construct a descriptor for the tag. If the registry doesn't support HEADing
-// a manifest, fallback to GET.
-func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
- ref, err := reference.WithTag(t.name, tag)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- u, err := t.ub.BuildManifestURL(ref)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- newRequest := func(method string) (*http.Response, error) {
- req, err := http.NewRequest(method, u, nil)
- if err != nil {
- return nil, err
- }
-
- for _, t := range distribution.ManifestMediaTypes() {
- req.Header.Add("Accept", t)
- }
- resp, err := t.client.Do(req)
- return resp, err
- }
-
- resp, err := newRequest("HEAD")
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- switch {
- case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
- // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
- return descriptorFromResponse(resp)
- default:
- // if the response is an error - there will be no body to decode.
- // Issue a GET request:
- // - for data from a server that does not handle HEAD
- // - to get error details in case of a failure
- resp, err = newRequest("GET")
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode >= 200 && resp.StatusCode < 400 {
- return descriptorFromResponse(resp)
- }
- return distribution.Descriptor{}, HandleErrorResponse(resp)
- }
-}
-
-func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
- panic("not implemented")
-}
-
-func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
- panic("not implemented")
-}
-
-func (t *tags) Untag(ctx context.Context, tag string) error {
- panic("not implemented")
-}
-
-type manifests struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
- etags map[string]string
-}
-
-func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
- ref, err := reference.WithDigest(ms.name, dgst)
- if err != nil {
- return false, err
- }
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return false, err
- }
-
- resp, err := ms.client.Head(u)
- if err != nil {
- return false, err
- }
-
- if SuccessStatus(resp.StatusCode) {
- return true, nil
- } else if resp.StatusCode == http.StatusNotFound {
- return false, nil
- }
- return false, HandleErrorResponse(resp)
-}
-
-// AddEtagToTag allows a client to supply an eTag to Get which will be
-// used for a conditional HTTP request. If the eTag matches, a nil manifest
-// and ErrManifestNotModified error will be returned. etag is automatically
-// quoted when added to this map.
-func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
- return etagOption{tag, etag}
-}
-
-type etagOption struct{ tag, etag string }
-
-func (o etagOption) Apply(ms distribution.ManifestService) error {
- if ms, ok := ms.(*manifests); ok {
- ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
- return nil
- }
- return fmt.Errorf("etag options is a client-only option")
-}
-
-// ReturnContentDigest allows a client to set a the content digest on
-// a successful request from the 'Docker-Content-Digest' header. This
-// returned digest is represents the digest which the registry uses
-// to refer to the content and can be used to delete the content.
-func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
- return contentDigestOption{dgst}
-}
-
-type contentDigestOption struct{ digest *digest.Digest }
-
-func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
- return nil
-}
-
-func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
- var (
- digestOrTag string
- ref reference.Named
- err error
- contentDgst *digest.Digest
- mediaTypes []string
- )
-
- for _, option := range options {
- switch opt := option.(type) {
- case distribution.WithTagOption:
- digestOrTag = opt.Tag
- ref, err = reference.WithTag(ms.name, opt.Tag)
- if err != nil {
- return nil, err
- }
- case contentDigestOption:
- contentDgst = opt.digest
- case distribution.WithManifestMediaTypesOption:
- mediaTypes = opt.MediaTypes
- default:
- err := option.Apply(ms)
- if err != nil {
- return nil, err
- }
- }
- }
-
- if digestOrTag == "" {
- digestOrTag = dgst.String()
- ref, err = reference.WithDigest(ms.name, dgst)
- if err != nil {
- return nil, err
- }
- }
-
- if len(mediaTypes) == 0 {
- mediaTypes = distribution.ManifestMediaTypes()
- }
-
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest("GET", u, nil)
- if err != nil {
- return nil, err
- }
-
- for _, t := range mediaTypes {
- req.Header.Add("Accept", t)
- }
-
- if _, ok := ms.etags[digestOrTag]; ok {
- req.Header.Set("If-None-Match", ms.etags[digestOrTag])
- }
-
- resp, err := ms.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotModified {
- return nil, distribution.ErrManifestNotModified
- } else if SuccessStatus(resp.StatusCode) {
- if contentDgst != nil {
- dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
- if err == nil {
- *contentDgst = dgst
- }
- }
- mt := resp.Header.Get("Content-Type")
- body, err := ioutil.ReadAll(resp.Body)
-
- if err != nil {
- return nil, err
- }
- m, _, err := distribution.UnmarshalManifest(mt, body)
- if err != nil {
- return nil, err
- }
- return m, nil
- }
- return nil, HandleErrorResponse(resp)
-}
-
-// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
-// tag name in order to build the correct upload URL.
-func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
- ref := ms.name
- var tagged bool
-
- for _, option := range options {
- if opt, ok := option.(distribution.WithTagOption); ok {
- var err error
- ref, err = reference.WithTag(ref, opt.Tag)
- if err != nil {
- return "", err
- }
- tagged = true
- } else {
- err := option.Apply(ms)
- if err != nil {
- return "", err
- }
- }
- }
- mediaType, p, err := m.Payload()
- if err != nil {
- return "", err
- }
-
- if !tagged {
- // generate a canonical digest and Put by digest
- _, d, err := distribution.UnmarshalManifest(mediaType, p)
- if err != nil {
- return "", err
- }
- ref, err = reference.WithDigest(ref, d.Digest)
- if err != nil {
- return "", err
- }
- }
-
- manifestURL, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return "", err
- }
-
- putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
- if err != nil {
- return "", err
- }
-
- putRequest.Header.Set("Content-Type", mediaType)
-
- resp, err := ms.client.Do(putRequest)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- dgstHeader := resp.Header.Get("Docker-Content-Digest")
- dgst, err := digest.Parse(dgstHeader)
- if err != nil {
- return "", err
- }
-
- return dgst, nil
- }
-
- return "", HandleErrorResponse(resp)
-}
-
-func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
- ref, err := reference.WithDigest(ms.name, dgst)
- if err != nil {
- return err
- }
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return err
- }
- req, err := http.NewRequest("DELETE", u, nil)
- if err != nil {
- return err
- }
-
- resp, err := ms.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- return nil
- }
- return HandleErrorResponse(resp)
-}
-
-// todo(richardscothern): Restore interface and implementation with merge of #1050
-/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
- panic("not supported")
-}*/
-
-type blobs struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
-
- statter distribution.BlobDescriptorService
- distribution.BlobDeleter
-}
-
-func sanitizeLocation(location, base string) (string, error) {
- baseURL, err := url.Parse(base)
- if err != nil {
- return "", err
- }
-
- locationURL, err := url.Parse(location)
- if err != nil {
- return "", err
- }
-
- return baseURL.ResolveReference(locationURL).String(), nil
-}
-
-func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- return bs.statter.Stat(ctx, dgst)
-
-}
-
-func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
- reader, err := bs.Open(ctx, dgst)
- if err != nil {
- return nil, err
- }
- defer reader.Close()
-
- return ioutil.ReadAll(reader)
-}
-
-func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return nil, err
- }
- blobURL, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return nil, err
- }
-
- return transport.NewHTTPReadSeeker(bs.client, blobURL,
- func(resp *http.Response) error {
- if resp.StatusCode == http.StatusNotFound {
- return distribution.ErrBlobUnknown
- }
- return HandleErrorResponse(resp)
- }), nil
-}
-
-func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
- panic("not implemented")
-}
-
-func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
- writer, err := bs.Create(ctx)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- dgstr := digest.Canonical.Digester()
- n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
- if err != nil {
- return distribution.Descriptor{}, err
- }
- if n < int64(len(p)) {
- return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
- }
-
- desc := distribution.Descriptor{
- MediaType: mediaType,
- Size: int64(len(p)),
- Digest: dgstr.Digest(),
- }
-
- return writer.Commit(ctx, desc)
-}
-
-type optionFunc func(interface{}) error
-
-func (f optionFunc) Apply(v interface{}) error {
- return f(v)
-}
-
-// WithMountFrom returns a BlobCreateOption which designates that the blob should be
-// mounted from the given canonical reference.
-func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
- return optionFunc(func(v interface{}) error {
- opts, ok := v.(*distribution.CreateOptions)
- if !ok {
- return fmt.Errorf("unexpected options type: %T", v)
- }
-
- opts.Mount.ShouldMount = true
- opts.Mount.From = ref
-
- return nil
- })
-}
-
-func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
- var opts distribution.CreateOptions
-
- for _, option := range options {
- err := option.Apply(&opts)
- if err != nil {
- return nil, err
- }
- }
-
- var values []url.Values
-
- if opts.Mount.ShouldMount {
- values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
- }
-
- u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
- if err != nil {
- return nil, err
- }
-
- resp, err := bs.client.Post(u, "", nil)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- switch resp.StatusCode {
- case http.StatusCreated:
- desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
- if err != nil {
- return nil, err
- }
- return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
- case http.StatusAccepted:
- // TODO(dmcgowan): Check for invalid UUID
- uuid := resp.Header.Get("Docker-Upload-UUID")
- location, err := sanitizeLocation(resp.Header.Get("Location"), u)
- if err != nil {
- return nil, err
- }
-
- return &httpBlobUpload{
- statter: bs.statter,
- client: bs.client,
- uuid: uuid,
- startedAt: time.Now(),
- location: location,
- }, nil
- default:
- return nil, HandleErrorResponse(resp)
- }
-}
-
-func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
- panic("not implemented")
-}
-
-func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
- return bs.statter.Clear(ctx, dgst)
-}
-
-type blobStatter struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
-}
-
-func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- u, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- resp, err := bs.client.Head(u)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- lengthHeader := resp.Header.Get("Content-Length")
- if lengthHeader == "" {
- return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
- }
-
- length, err := strconv.ParseInt(lengthHeader, 10, 64)
- if err != nil {
- return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
- }
-
- return distribution.Descriptor{
- MediaType: resp.Header.Get("Content-Type"),
- Size: length,
- Digest: dgst,
- }, nil
- } else if resp.StatusCode == http.StatusNotFound {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
- return distribution.Descriptor{}, HandleErrorResponse(resp)
-}
-
-func buildCatalogValues(maxEntries int, last string) url.Values {
- values := url.Values{}
-
- if maxEntries > 0 {
- values.Add("n", strconv.Itoa(maxEntries))
- }
-
- if last != "" {
- values.Add("last", last)
- }
-
- return values
-}
-
-func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return err
- }
- blobURL, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return err
- }
-
- req, err := http.NewRequest("DELETE", blobURL, nil)
- if err != nil {
- return err
- }
-
- resp, err := bs.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- return nil
- }
- return HandleErrorResponse(resp)
-}
-
-func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
deleted file mode 100644
index 1d0b382fb..000000000
--- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package transport
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "regexp"
- "strconv"
-)
-
-var (
- contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
-
- // ErrWrongCodeForByteRange is returned if the client sends a request
- // with a Range header but the server returns a 2xx or 3xx code other
- // than 206 Partial Content.
- ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
-)
-
-// ReadSeekCloser combines io.ReadSeeker with io.Closer.
-type ReadSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
-// request. When seeking and starting a read from a non-zero offset
-// the a "Range" header will be added which sets the offset.
-// TODO(dmcgowan): Move this into a separate utility package
-func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
- return &httpReadSeeker{
- client: client,
- url: url,
- errorHandler: errorHandler,
- }
-}
-
-type httpReadSeeker struct {
- client *http.Client
- url string
-
- // errorHandler creates an error from an unsuccessful HTTP response.
- // This allows the error to be created with the HTTP response body
- // without leaking the body through a returned error.
- errorHandler func(*http.Response) error
-
- size int64
-
- // rc is the remote read closer.
- rc io.ReadCloser
- // readerOffset tracks the offset as of the last read.
- readerOffset int64
- // seekOffset allows Seek to override the offset. Seek changes
- // seekOffset instead of changing readOffset directly so that
- // connection resets can be delayed and possibly avoided if the
- // seek is undone (i.e. seeking to the end and then back to the
- // beginning).
- seekOffset int64
- err error
-}
-
-func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
- if hrs.err != nil {
- return 0, hrs.err
- }
-
- // If we sought to a different position, we need to reset the
- // connection. This logic is here instead of Seek so that if
- // a seek is undone before the next read, the connection doesn't
- // need to be closed and reopened. A common example of this is
- // seeking to the end to determine the length, and then seeking
- // back to the original position.
- if hrs.readerOffset != hrs.seekOffset {
- hrs.reset()
- }
-
- hrs.readerOffset = hrs.seekOffset
-
- rd, err := hrs.reader()
- if err != nil {
- return 0, err
- }
-
- n, err = rd.Read(p)
- hrs.seekOffset += int64(n)
- hrs.readerOffset += int64(n)
-
- return n, err
-}
-
-func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
- if hrs.err != nil {
- return 0, hrs.err
- }
-
- lastReaderOffset := hrs.readerOffset
-
- if whence == io.SeekStart && hrs.rc == nil {
- // If no request has been made yet, and we are seeking to an
- // absolute position, set the read offset as well to avoid an
- // unnecessary request.
- hrs.readerOffset = offset
- }
-
- _, err := hrs.reader()
- if err != nil {
- hrs.readerOffset = lastReaderOffset
- return 0, err
- }
-
- newOffset := hrs.seekOffset
-
- switch whence {
- case io.SeekCurrent:
- newOffset += offset
- case io.SeekEnd:
- if hrs.size < 0 {
- return 0, errors.New("content length not known")
- }
- newOffset = hrs.size + offset
- case io.SeekStart:
- newOffset = offset
- }
-
- if newOffset < 0 {
- err = errors.New("cannot seek to negative position")
- } else {
- hrs.seekOffset = newOffset
- }
-
- return hrs.seekOffset, err
-}
-
-func (hrs *httpReadSeeker) Close() error {
- if hrs.err != nil {
- return hrs.err
- }
-
- // close and release reader chain
- if hrs.rc != nil {
- hrs.rc.Close()
- }
-
- hrs.rc = nil
-
- hrs.err = errors.New("httpLayer: closed")
-
- return nil
-}
-
-func (hrs *httpReadSeeker) reset() {
- if hrs.err != nil {
- return
- }
- if hrs.rc != nil {
- hrs.rc.Close()
- hrs.rc = nil
- }
-}
-
-func (hrs *httpReadSeeker) reader() (io.Reader, error) {
- if hrs.err != nil {
- return nil, hrs.err
- }
-
- if hrs.rc != nil {
- return hrs.rc, nil
- }
-
- req, err := http.NewRequest("GET", hrs.url, nil)
- if err != nil {
- return nil, err
- }
-
- if hrs.readerOffset > 0 {
- // If we are at different offset, issue a range request from there.
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
- // TODO: get context in here
- // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
- }
-
- req.Header.Add("Accept-Encoding", "identity")
- resp, err := hrs.client.Do(req)
- if err != nil {
- return nil, err
- }
-
- // Normally would use client.SuccessStatus, but that would be a cyclic
- // import
- if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
- if hrs.readerOffset > 0 {
- if resp.StatusCode != http.StatusPartialContent {
- return nil, ErrWrongCodeForByteRange
- }
-
- contentRange := resp.Header.Get("Content-Range")
- if contentRange == "" {
- return nil, errors.New("no Content-Range header found in HTTP 206 response")
- }
-
- submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
- if len(submatches) < 4 {
- return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
- }
-
- startByte, err := strconv.ParseUint(submatches[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
- }
-
- if startByte != uint64(hrs.readerOffset) {
- return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
- }
-
- endByte, err := strconv.ParseUint(submatches[2], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
- }
-
- if submatches[3] == "*" {
- hrs.size = -1
- } else {
- size, err := strconv.ParseUint(submatches[3], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
- }
-
- if endByte+1 != size {
- return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
- }
-
- hrs.size = int64(size)
- }
- } else if resp.StatusCode == http.StatusOK {
- hrs.size = resp.ContentLength
- } else {
- hrs.size = -1
- }
- hrs.rc = resp.Body
- } else {
- defer resp.Body.Close()
- if hrs.errorHandler != nil {
- return nil, hrs.errorHandler(resp)
- }
- return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
- }
-
- return hrs.rc, nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
deleted file mode 100644
index 30e45fab0..000000000
--- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package transport
-
-import (
- "io"
- "net/http"
- "sync"
-)
-
-// RequestModifier represents an object which will do an inplace
-// modification of an HTTP request.
-type RequestModifier interface {
- ModifyRequest(*http.Request) error
-}
-
-type headerModifier http.Header
-
-// NewHeaderRequestModifier returns a new RequestModifier which will
-// add the given headers to a request.
-func NewHeaderRequestModifier(header http.Header) RequestModifier {
- return headerModifier(header)
-}
-
-func (h headerModifier) ModifyRequest(req *http.Request) error {
- for k, s := range http.Header(h) {
- req.Header[k] = append(req.Header[k], s...)
- }
-
- return nil
-}
-
-// NewTransport creates a new transport which will apply modifiers to
-// the request on a RoundTrip call.
-func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
- return &transport{
- Modifiers: modifiers,
- Base: base,
- }
-}
-
-// transport is an http.RoundTripper that makes HTTP requests after
-// copying and modifying the request
-type transport struct {
- Modifiers []RequestModifier
- Base http.RoundTripper
-
- mu sync.Mutex // guards modReq
- modReq map[*http.Request]*http.Request // original -> modified
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
-func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
- req2 := cloneRequest(req)
- for _, modifier := range t.Modifiers {
- if err := modifier.ModifyRequest(req2); err != nil {
- return nil, err
- }
- }
-
- t.setModReq(req, req2)
- res, err := t.base().RoundTrip(req2)
- if err != nil {
- t.setModReq(req, nil)
- return nil, err
- }
- res.Body = &onEOFReader{
- rc: res.Body,
- fn: func() { t.setModReq(req, nil) },
- }
- return res, nil
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- t.mu.Lock()
- modReq := t.modReq[req]
- delete(t.modReq, req)
- t.mu.Unlock()
- cr.CancelRequest(modReq)
- }
-}
-
-func (t *transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-func (t *transport) setModReq(orig, mod *http.Request) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.modReq == nil {
- t.modReq = make(map[*http.Request]*http.Request)
- }
- if mod == nil {
- delete(t.modReq, orig)
- } else {
- t.modReq[orig] = mod
- }
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
-
- return r2
-}
-
-type onEOFReader struct {
- rc io.ReadCloser
- fn func()
-}
-
-func (r *onEOFReader) Read(p []byte) (n int, err error) {
- n, err = r.rc.Read(p)
- if err == io.EOF {
- r.runFunc()
- }
- return
-}
-
-func (r *onEOFReader) Close() error {
- err := r.rc.Close()
- r.runFunc()
- return err
-}
-
-func (r *onEOFReader) runFunc() {
- if fn := r.fn; fn != nil {
- fn()
- r.fn = nil
- }
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
deleted file mode 100644
index 10a390919..000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Package cache provides facilities to speed up access to the storage
-// backend.
-package cache
-
-import (
- "fmt"
-
- "github.com/docker/distribution"
-)
-
-// BlobDescriptorCacheProvider provides repository scoped
-// BlobDescriptorService cache instances and a global descriptor cache.
-type BlobDescriptorCacheProvider interface {
- distribution.BlobDescriptorService
-
- RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
-}
-
-// ValidateDescriptor provides a helper function to ensure that caches have
-// common criteria for admitting descriptors.
-func ValidateDescriptor(desc distribution.Descriptor) error {
- if err := desc.Digest.Validate(); err != nil {
- return err
- }
-
- if desc.Size < 0 {
- return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
- }
-
- if desc.MediaType == "" {
- return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
- }
-
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
deleted file mode 100644
index ac4c45211..000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package cache
-
-import (
- "context"
-
- "github.com/docker/distribution"
- prometheus "github.com/docker/distribution/metrics"
- "github.com/opencontainers/go-digest"
-)
-
-// Metrics is used to hold metric counters
-// related to the number of times a cache was
-// hit or missed.
-type Metrics struct {
- Requests uint64
- Hits uint64
- Misses uint64
-}
-
-// Logger can be provided on the MetricsTracker to log errors.
-//
-// Usually, this is just a proxy to dcontext.GetLogger.
-type Logger interface {
- Errorf(format string, args ...interface{})
-}
-
-// MetricsTracker represents a metric tracker
-// which simply counts the number of hits and misses.
-type MetricsTracker interface {
- Hit()
- Miss()
- Metrics() Metrics
- Logger(context.Context) Logger
-}
-
-type cachedBlobStatter struct {
- cache distribution.BlobDescriptorService
- backend distribution.BlobDescriptorService
- tracker MetricsTracker
-}
-
-var (
- // cacheCount is the number of total cache request received/hits/misses
- cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
-)
-
-// NewCachedBlobStatter creates a new statter which prefers a cache and
-// falls back to a backend.
-func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
- return &cachedBlobStatter{
- cache: cache,
- backend: backend,
- }
-}
-
-// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
-// falls back to a backend. Hits and misses will send to the tracker.
-func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
- return &cachedBlobStatter{
- cache: cache,
- backend: backend,
- tracker: tracker,
- }
-}
-
-func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- cacheCount.WithValues("Request").Inc(1)
- desc, err := cbds.cache.Stat(ctx, dgst)
- if err != nil {
- if err != distribution.ErrBlobUnknown {
- logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err)
- }
-
- goto fallback
- }
- cacheCount.WithValues("Hit").Inc(1)
- if cbds.tracker != nil {
- cbds.tracker.Hit()
- }
- return desc, nil
-fallback:
- cacheCount.WithValues("Miss").Inc(1)
- if cbds.tracker != nil {
- cbds.tracker.Miss()
- }
- desc, err = cbds.backend.Stat(ctx, dgst)
- if err != nil {
- return desc, err
- }
-
- if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
- }
-
- return desc, err
-
-}
-
-func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
- err := cbds.cache.Clear(ctx, dgst)
- if err != nil {
- return err
- }
-
- err = cbds.backend.Clear(ctx, dgst)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
- }
- return nil
-}
-
-func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
- if tracker == nil {
- return
- }
-
- logger := tracker.Logger(ctx)
- if logger == nil {
- return
- }
- logger.Errorf(format, args...)
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
deleted file mode 100644
index 42d94d9bd..000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package memory
-
-import (
- "context"
- "sync"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage/cache"
- "github.com/opencontainers/go-digest"
-)
-
-type inMemoryBlobDescriptorCacheProvider struct {
- global *mapBlobDescriptorCache
- repositories map[string]*mapBlobDescriptorCache
- mu sync.RWMutex
-}
-
-// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
-// storing blob descriptor data.
-func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
- return &inMemoryBlobDescriptorCacheProvider{
- global: newMapBlobDescriptorCache(),
- repositories: make(map[string]*mapBlobDescriptorCache),
- }
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
- if _, err := reference.ParseNormalizedNamed(repo); err != nil {
- return nil, err
- }
-
- imbdcp.mu.RLock()
- defer imbdcp.mu.RUnlock()
-
- return &repositoryScopedInMemoryBlobDescriptorCache{
- repo: repo,
- parent: imbdcp,
- repository: imbdcp.repositories[repo],
- }, nil
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- return imbdcp.global.Stat(ctx, dgst)
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
- return imbdcp.global.Clear(ctx, dgst)
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- _, err := imbdcp.Stat(ctx, dgst)
- if err == distribution.ErrBlobUnknown {
-
- if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
- // if the digests differ, set the other canonical mapping
- if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
- return err
- }
- }
-
- // unknown, just set it
- return imbdcp.global.SetDescriptor(ctx, dgst, desc)
- }
-
- // we already know it, do nothing
- return err
-}
-
-// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
-// repository cache. Instances are not thread-safe but the delegated
-// operations are.
-type repositoryScopedInMemoryBlobDescriptorCache struct {
- repo string
- parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
- repository *mapBlobDescriptorCache
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- rsimbdcp.parent.mu.Unlock()
-
- if repo == nil {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
-
- return repo.Stat(ctx, dgst)
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- rsimbdcp.parent.mu.Unlock()
-
- if repo == nil {
- return distribution.ErrBlobUnknown
- }
-
- return repo.Clear(ctx, dgst)
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- if repo == nil {
- // allocate map since we are setting it now.
- var ok bool
- // have to read back value since we may have allocated elsewhere.
- repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
- if !ok {
- repo = newMapBlobDescriptorCache()
- rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
- }
- rsimbdcp.repository = repo
- }
- rsimbdcp.parent.mu.Unlock()
-
- if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
- return err
- }
-
- return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
-}
-
-// mapBlobDescriptorCache provides a simple map-based implementation of the
-// descriptor cache.
-type mapBlobDescriptorCache struct {
- descriptors map[digest.Digest]distribution.Descriptor
- mu sync.RWMutex
-}
-
-var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
-
-func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
- return &mapBlobDescriptorCache{
- descriptors: make(map[digest.Digest]distribution.Descriptor),
- }
-}
-
-func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- if err := dgst.Validate(); err != nil {
- return distribution.Descriptor{}, err
- }
-
- mbdc.mu.RLock()
- defer mbdc.mu.RUnlock()
-
- desc, ok := mbdc.descriptors[dgst]
- if !ok {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
-
- return desc, nil
-}
-
-func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
- mbdc.mu.Lock()
- defer mbdc.mu.Unlock()
-
- delete(mbdc.descriptors, dgst)
- return nil
-}
-
-func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- if err := dgst.Validate(); err != nil {
- return err
- }
-
- if err := cache.ValidateDescriptor(desc); err != nil {
- return err
- }
-
- mbdc.mu.Lock()
- defer mbdc.mu.Unlock()
-
- mbdc.descriptors[dgst] = desc
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
deleted file mode 100644
index f22df2b85..000000000
--- a/vendor/github.com/docker/distribution/tags.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package distribution
-
-import (
- "context"
-)
-
-// TagService provides access to information about tagged objects.
-type TagService interface {
- // Get retrieves the descriptor identified by the tag. Some
- // implementations may differentiate between "trusted" tags and
- // "untrusted" tags. If a tag is "untrusted", the mapping will be returned
- // as an ErrTagUntrusted error, with the target descriptor.
- Get(ctx context.Context, tag string) (Descriptor, error)
-
- // Tag associates the tag with the provided descriptor, updating the
- // current association, if needed.
- Tag(ctx context.Context, tag string, desc Descriptor) error
-
- // Untag removes the given tag association
- Untag(ctx context.Context, tag string) error
-
- // All returns the set of tags managed by this tag service
- All(ctx context.Context) ([]string, error)
-
- // Lookup returns the set of tags referencing the given digest.
- Lookup(ctx context.Context, digest Descriptor) ([]string, error)
-}
diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf
deleted file mode 100644
index a249caf26..000000000
--- a/vendor/github.com/docker/distribution/vendor.conf
+++ /dev/null
@@ -1,51 +0,0 @@
-github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
-github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
-github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
-github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490
-github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
-github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
-github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
-github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
-github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
-github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
-github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
-github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
-github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
-github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
-github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
-github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
-github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
-github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
-github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
-github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
-github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
-github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6
-github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
-github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
-github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
-github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
-github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd
-github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
-github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
-github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
-github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
-github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
-github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
-golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
-golang.org/x/net 4876518f9e71663000c348837735820161a42df7
-golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
-golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
-google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
-google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
-google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
-google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
-gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
-gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
-gopkg.in/yaml.v2 v2.2.1
-rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
-github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
-github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go
index d1d0434cb..7ca5ab722 100644
--- a/vendor/github.com/docker/docker-credential-helpers/client/client.go
+++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go
@@ -16,17 +16,15 @@ func isValidCredsMessage(msg string) error {
if credentials.IsCredentialsMissingServerURLMessage(msg) {
return credentials.NewErrCredentialsMissingServerURL()
}
-
if credentials.IsCredentialsMissingUsernameMessage(msg) {
return credentials.NewErrCredentialsMissingUsername()
}
-
return nil
}
// Store uses an external program to save credentials.
func Store(program ProgramFunc, creds *credentials.Credentials) error {
- cmd := program("store")
+ cmd := program(credentials.ActionStore)
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
@@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
out, err := cmd.Output()
if err != nil {
- t := strings.TrimSpace(string(out))
-
- if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil {
err = isValidErr
}
-
- return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
+ return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out)))
}
return nil
@@ -50,22 +45,20 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
// Get executes an external program to get the credentials from a native store.
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
- cmd := program("get")
+ cmd := program(credentials.ActionGet)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
- t := strings.TrimSpace(string(out))
-
- if credentials.IsErrCredentialsNotFoundMessage(t) {
+ if credentials.IsErrCredentialsNotFoundMessage(string(out)) {
return nil, credentials.NewErrCredentialsNotFound()
}
- if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil {
err = isValidErr
}
- return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
+ return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out)))
}
resp := &credentials.Credentials{
@@ -81,7 +74,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error
// Erase executes a program to remove the server credentials from the native store.
func Erase(program ProgramFunc, serverURL string) error {
- cmd := program("erase")
+ cmd := program(credentials.ActionErase)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
@@ -99,7 +92,7 @@ func Erase(program ProgramFunc, serverURL string) error {
// List executes a program to list server credentials in the native store.
func List(program ProgramFunc) (map[string]string, error) {
- cmd := program("list")
+ cmd := program(credentials.ActionList)
cmd.Input(strings.NewReader("unused"))
out, err := cmd.Output()
if err != nil {
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go
index 8da334306..1936234be 100644
--- a/vendor/github.com/docker/docker-credential-helpers/client/command.go
+++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go
@@ -1,7 +1,6 @@
package client
import (
- "fmt"
"io"
"os"
"os/exec"
@@ -30,27 +29,26 @@ func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
programCmd := exec.Command(commandName, args...)
- programCmd.Env = os.Environ()
if env != nil {
for k, v := range *env {
- programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
+ programCmd.Env = append(programCmd.Environ(), k+"="+v)
}
}
programCmd.Stderr = os.Stderr
return programCmd
}
-// Shell invokes shell commands to talk with a remote credentials helper.
+// Shell invokes shell commands to talk with a remote credentials-helper.
type Shell struct {
cmd *exec.Cmd
}
-// Output returns responses from the remote credentials helper.
+// Output returns responses from the remote credentials-helper.
func (s *Shell) Output() ([]byte, error) {
return s.cmd.Output()
}
-// Input sets the input to send to a remote credentials helper.
+// Input sets the input to send to a remote credentials-helper.
func (s *Shell) Input(in io.Reader) {
s.cmd.Stdin = in
}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
index da8b594e7..eac551884 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
@@ -10,6 +10,20 @@ import (
"strings"
)
+// Action defines the name of an action (sub-command) supported by a
+// credential-helper binary. It is an alias for "string", and mostly
+// for convenience.
+type Action = string
+
+// List of actions (sub-commands) supported by credential-helper binaries.
+const (
+ ActionStore Action = "store"
+ ActionGet Action = "get"
+ ActionErase Action = "erase"
+ ActionList Action = "list"
+ ActionVersion Action = "version"
+)
+
// Credentials holds the information shared between docker and the credentials store.
type Credentials struct {
ServerURL string
@@ -43,42 +57,52 @@ func SetCredsLabel(label string) {
CredsLabel = label
}
-// Serve initializes the credentials helper and parses the action argument.
+// Serve initializes the credentials-helper and parses the action argument.
// This function is designed to be called from a command line interface.
// It uses os.Args[1] as the key for the action.
// It uses os.Stdin as input and os.Stdout as output.
// This function terminates the program with os.Exit(1) if there is an error.
func Serve(helper Helper) {
- var err error
if len(os.Args) != 2 {
- err = fmt.Errorf("Usage: %s ", os.Args[0])
+ _, _ = fmt.Fprintln(os.Stdout, usage())
+ os.Exit(1)
}
- if err == nil {
- err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
+ switch os.Args[1] {
+ case "--version", "-v":
+ _ = PrintVersion(os.Stdout)
+ os.Exit(0)
+ case "--help", "-h":
+ _, _ = fmt.Fprintln(os.Stdout, usage())
+ os.Exit(0)
}
- if err != nil {
- fmt.Fprintf(os.Stdout, "%v\n", err)
+ if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil {
+ _, _ = fmt.Fprintln(os.Stdout, err)
os.Exit(1)
}
}
-// HandleCommand uses a helper and a key to run a credential action.
-func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
- switch key {
- case "store":
+func usage() string {
+ return fmt.Sprintf("Usage: %s ", Name)
+}
+
+// HandleCommand runs a helper to execute a credential action.
+func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error {
+ switch action {
+ case ActionStore:
return Store(helper, in)
- case "get":
+ case ActionGet:
return Get(helper, in, out)
- case "erase":
+ case ActionErase:
return Erase(helper, in)
- case "list":
+ case ActionList:
return List(helper, out)
- case "version":
+ case ActionVersion:
return PrintVersion(out)
+ default:
+ return fmt.Errorf("%s: unknown action: %s", Name, action)
}
- return fmt.Errorf("Unknown credential action `%s`", key)
}
// Store uses a helper and an input reader to save credentials.
@@ -132,18 +156,17 @@ func Get(helper Helper, reader io.Reader, writer io.Writer) error {
return err
}
- resp := Credentials{
+ buffer.Reset()
+ err = json.NewEncoder(buffer).Encode(Credentials{
ServerURL: serverURL,
Username: username,
Secret: secret,
- }
-
- buffer.Reset()
- if err := json.NewEncoder(buffer).Encode(resp); err != nil {
+ })
+ if err != nil {
return err
}
- fmt.Fprint(writer, buffer.String())
+ _, _ = fmt.Fprint(writer, buffer.String())
return nil
}
@@ -169,8 +192,8 @@ func Erase(helper Helper, reader io.Reader) error {
return helper.Delete(serverURL)
}
-//List returns all the serverURLs of keys in
-//the OS store as a list of strings
+// List returns all the serverURLs of keys in
+// the OS store as a list of strings
func List(helper Helper, writer io.Writer) error {
accts, err := helper.List()
if err != nil {
@@ -179,8 +202,8 @@ func List(helper Helper, writer io.Writer) error {
return json.NewEncoder(writer).Encode(accts)
}
-//PrintVersion outputs the current version.
+// PrintVersion outputs the current version.
func PrintVersion(writer io.Writer) error {
- fmt.Fprintln(writer, Version)
+ _, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
return nil
}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
index fe6a5aef4..2283d5a44 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
@@ -1,5 +1,10 @@
package credentials
+import (
+ "errors"
+ "strings"
+)
+
const (
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
// the same message and docker can handle it properly.
@@ -21,6 +26,11 @@ func (errCredentialsNotFound) Error() string {
return errCredentialsNotFoundMessage
}
+// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface.
+//
+// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound
+func (errCredentialsNotFound) NotFound() {}
+
// NewErrCredentialsNotFound creates a new error
// for when the credentials are not in the store.
func NewErrCredentialsNotFound() error {
@@ -30,8 +40,8 @@ func NewErrCredentialsNotFound() error {
// IsErrCredentialsNotFound returns true if the error
// was caused by not having a set of credentials in a store.
func IsErrCredentialsNotFound(err error) bool {
- _, ok := err.(errCredentialsNotFound)
- return ok
+ var target errCredentialsNotFound
+ return errors.As(err, &target)
}
// IsErrCredentialsNotFoundMessage returns true if the error
@@ -40,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool {
// This function helps to check messages returned by an
// external program via its standard output.
func IsErrCredentialsNotFoundMessage(err string) bool {
- return err == errCredentialsNotFoundMessage
+ return strings.TrimSpace(err) == errCredentialsNotFoundMessage
}
// errCredentialsMissingServerURL represents an error raised
@@ -53,6 +63,12 @@ func (errCredentialsMissingServerURL) Error() string {
return errCredentialsMissingServerURLMessage
}
+// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
+// interface.
+//
+// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
+func (errCredentialsMissingServerURL) InvalidParameter() {}
+
// errCredentialsMissingUsername represents an error raised
// when the credentials object has no username or when no
// username is provided to a credentials operation requiring
@@ -63,6 +79,12 @@ func (errCredentialsMissingUsername) Error() string {
return errCredentialsMissingUsernameMessage
}
+// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
+// interface.
+//
+// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
+func (errCredentialsMissingUsername) InvalidParameter() {}
+
// NewErrCredentialsMissingServerURL creates a new error for
// errCredentialsMissingServerURL.
func NewErrCredentialsMissingServerURL() error {
@@ -78,25 +100,25 @@ func NewErrCredentialsMissingUsername() error {
// IsCredentialsMissingServerURL returns true if the error
// was an errCredentialsMissingServerURL.
func IsCredentialsMissingServerURL(err error) bool {
- _, ok := err.(errCredentialsMissingServerURL)
- return ok
+ var target errCredentialsMissingServerURL
+ return errors.As(err, &target)
}
// IsCredentialsMissingServerURLMessage checks for an
// errCredentialsMissingServerURL in the error message.
func IsCredentialsMissingServerURLMessage(err string) bool {
- return err == errCredentialsMissingServerURLMessage
+ return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage
}
// IsCredentialsMissingUsername returns true if the error
// was an errCredentialsMissingUsername.
func IsCredentialsMissingUsername(err error) bool {
- _, ok := err.(errCredentialsMissingUsername)
- return ok
+ var target errCredentialsMissingUsername
+ return errors.As(err, &target)
}
// IsCredentialsMissingUsernameMessage checks for an
// errCredentialsMissingUsername in the error message.
func IsCredentialsMissingUsernameMessage(err string) bool {
- return err == errCredentialsMissingUsernameMessage
+ return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage
}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
index c2cc3e2e0..84377c263 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -1,4 +1,16 @@
package credentials
-// Version holds a string describing the current version
-const Version = "0.6.3"
+var (
+ // Name is filled at linking time
+ Name = ""
+
+ // Package is filled at linking time
+ Package = "github.com/docker/docker-credential-helpers"
+
+ // Version holds the complete version number. Filled in at linking time.
+ Version = "v0.0.0+unknown"
+
+ // Revision is filled with the VCS (e.g. git) revision being used to build
+ // the program at linking time.
+ Revision = ""
+)
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
index dffacff11..5f93eeb4e 100644
--- a/vendor/github.com/docker/docker/AUTHORS
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -1,5 +1,6 @@
-# This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `hack/generate-authors.sh`.
+# File @generated by hack/generate-authors.sh. DO NOT EDIT.
+# This file lists all contributors to the repository.
+# See hack/generate-authors.sh to make modifications.
Aanand Prasad
Aaron Davidson
@@ -7,16 +8,18 @@ Aaron Feng
Aaron Hnatiw
Aaron Huslage
Aaron L. Xu
-Aaron Lehmann
+Aaron Lehmann
Aaron Welch
-Aaron.L.Xu
+Aaron Yoshitake
Abel Muiño
Abhijeet Kasurde
-Abhinandan Prativadi
+Abhinandan Prativadi
Abhinav Ajgaonkar
Abhishek Chanda
Abhishek Sharma
Abin Shahab
+Abirdcfly
+Ada Mancini
Adam Avilla
Adam Dobrawy
Adam Eijdenberg
@@ -25,7 +28,10 @@ Adam Miller
Adam Mills
Adam Pointer
Adam Singer
+Adam Thornton
Adam Walz
+Adam Williams
+AdamKorcz
Addam Hardy
Aditi Rajagopal
Aditya
@@ -51,16 +57,20 @@ Akihiro Suda
Akim Demaille
Akira Koyasu
Akshay Karle
+Akshay Moghe
Al Tobey
alambike
Alan Hoyle
Alan Scherger
Alan Thompson
+Alano Terblanche
Albert Callarisa
Albert Zhang
-Albin Kerouanton
+Albin Kerouanton
+Alec Benson
Alejandro González Hevia
Aleksa Sarai
+Aleksandr Chebotov
Aleksandrs Fadins
Alena Prokharchyk
Alessandro Boch
@@ -72,14 +82,17 @@ Alex Crawford
Alex Ellis
Alex Gaynor
Alex Goodman
+Alex Nordlund
Alex Olshansky
Alex Samorukov
+Alex Stockinger
Alex Warhawk
Alexander Artemenko
Alexander Boyd
Alexander Larsson
Alexander Midlash
-Alexander Morozov
+Alexander Morozov
+Alexander Polakov