diff --git a/go.mod b/go.mod index 9e6e7a2afa57..42a7fef12961 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/coreos/stream-metadata-go v0.4.9 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 + github.com/distribution/distribution/v3 v3.0.0 github.com/docker/docker v27.3.1+incompatible github.com/fsouza/go-dockerclient v1.12.0 github.com/gebn/bmc v0.0.0-20250519231546-bf709e03fe3c @@ -101,6 +101,7 @@ require ( gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + helm.sh/helm/v3 v3.18.0 k8s.io/api v0.34.1 k8s.io/apiextensions-apiserver v0.34.1 k8s.io/apimachinery v0.34.1 @@ -133,8 +134,8 @@ require ( cloud.google.com/go/compute/metadata v0.8.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect + dario.cat/mergo v1.0.1 // indirect git.sr.ht/~sbinet/gg v0.5.0 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect @@ -142,7 +143,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect @@ -153,11 +154,16 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect github.com/IBM/platform-services-go-sdk v0.81.0 // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hnslib v0.1.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect @@ -175,11 +181,13 @@ require ( github.com/cloudflare/circl v1.6.0 // indirect github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/container-storage-interface/spec v1.9.0 // indirect + github.com/containerd/containerd v1.7.27 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/ttrpc v1.2.6 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.2 // indirect github.com/coreos/go-oidc v2.3.0+incompatible // indirect github.com/coreos/go-semver v0.3.1 // indirect @@ -196,9 +204,11 @@ require ( github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect @@ -207,6 +217,7 @@ require ( github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-fonts/liberation v0.3.1 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -225,6 +236,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gocarina/gocsv v0.0.0-20231116093920-b87c2d0e983a // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect @@ -243,21 +255,26 @@ require ( github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -265,6 +282,8 @@ require ( github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/jspointer v0.0.0-20181205001929-82fadba7561c // indirect github.com/lestrrat-go/jsref v0.0.0-20181205001954-1b590508f37d // indirect @@ -272,6 +291,7 @@ require ( github.com/lestrrat-go/jsval v0.0.0-20181205002323-20277e9befc0 // indirect github.com/lestrrat-go/pdebug v0.0.0-20200204225717-4d6bd78da58d // indirect github.com/lestrrat-go/structinfo v0.0.0-20190212233437-acd51874663b // indirect + github.com/lib/pq v1.10.9 // indirect github.com/libopenstorage/openstorage v1.0.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.5 // indirect @@ -279,6 +299,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-ieproxy v0.0.11 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect github.com/microsoft/kiota-abstractions-go v1.9.3 // indirect github.com/microsoft/kiota-authentication-azure-go v1.3.0 // indirect github.com/microsoft/kiota-http-go v1.5.2 // indirect @@ -288,9 +309,11 @@ require ( github.com/microsoft/kiota-serialization-text-go v1.1.2 // indirect github.com/microsoftgraph/msgraph-sdk-go-core v1.3.2 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -298,7 +321,7 @@ require ( github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect @@ -323,12 +346,14 @@ require ( github.com/prometheus/procfs v0.17.0 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/smartystreets/assertions v1.1.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/src-d/gcfg v1.4.0 // indirect @@ -340,6 +365,8 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/zclconf/go-cty v1.16.2 // indirect @@ -400,6 +427,7 @@ require ( k8s.io/kubelet v0.31.1 // indirect k8s.io/mount-utils v0.0.0 // indirect k8s.io/sample-apiserver v0.0.0 // indirect + oras.land/oras-go/v2 v2.5.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 // indirect @@ -410,6 +438,7 @@ require ( ) replace ( + github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20251017123720-96593f323733 diff --git a/go.sum b/go.sum index d7ccdbad97de..0d74642660b5 100644 --- a/go.sum +++ b/go.sum @@ -61,9 +61,11 @@ cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsL cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU= cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo= git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE= git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8= @@ -120,8 +122,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= @@ -156,7 +158,11 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= @@ -177,8 +183,16 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1 github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -190,6 +204,8 @@ github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNx github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2 h1:x8Brv0YNEe6jY3V/hQglIG2nd8g5E2Zj5ubGKkPQctQ= github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= @@ -224,6 +240,14 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -254,6 +278,8 @@ github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD9 github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/container-storage-interface/spec v1.9.0 h1:zKtX4STsq31Knz3gciCYCi1SXtO2HJDecIjDVboYavY= github.com/container-storage-interface/spec v1.9.0/go.mod h1:ZfDu+3ZRyeVqxZM0Ds19MVLkN2d1XJ5MAfi1L3VjlT0= +github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= +github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -262,8 +288,10 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/ttrpc v1.2.6 h1:zG+Kn5EZ6MUYCS1t2Hmt2J4tMVaLSFEJVOraDQwNPC4= -github.com/containerd/ttrpc v1.2.6/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.2 h1:3jN/k2ysKuPCsln5Qv8bzR9cxal8XjkxPogJfSNO31k= github.com/containerd/typeurl/v2 v2.2.2/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0= @@ -298,6 +326,10 @@ github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFT github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= @@ -326,8 +358,8 @@ github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfU github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= @@ -344,8 +376,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= @@ -383,6 +417,8 @@ github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs= @@ -427,8 +463,12 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= @@ -487,6 +527,8 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -561,11 +603,15 @@ github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfg github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= @@ -584,6 +630,8 @@ github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplb github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -595,6 +643,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -609,6 +659,8 @@ github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -621,6 +673,8 @@ github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEs github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -646,6 +700,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -684,6 +740,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= @@ -701,6 +761,8 @@ github.com/lestrrat-go/structinfo v0.0.0-20190212233437-acd51874663b h1:YUFRoeHK github.com/lestrrat-go/structinfo v0.0.0-20190212233437-acd51874663b/go.mod h1:s2U6PowV3/Jobkx/S9d0XiPwOzs6niW3DIouw+7nZC8= github.com/lestrrat/go-jsschema v0.0.0-20181205002244-5c81c58ffcc3 h1:UaOmzcaCH2ziMcSbQFBq/3Iuz/E/Jr/GOGtV80jpFII= github.com/lestrrat/go-jsschema v0.0.0-20181205002244-5c81c58ffcc3/go.mod h1:mwYHT2Axb+cWge5Mj02uHhpi6Gfag30trq7E7AOMAdY= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -723,6 +785,9 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.30 h1:bVreufq3EAIG1Quvws73du3/QgdeZ3myglJlrzSYYCY= github.com/mattn/go-sqlite3 v1.14.30/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/metallb/frr-k8s v0.0.15 h1:6M3UGhovX1EFoaSGjrRD7djUAx3w2I+g81FH8OVtHkM= @@ -746,9 +811,13 @@ github.com/microsoftgraph/msgraph-sdk-go v1.81.0/go.mod h1:1V9jKcRL+Czs3u8gI2XjU github.com/microsoftgraph/msgraph-sdk-go-core v1.3.2 h1:5jCUSosTKaINzPPQXsz7wsHWwknyBmJSu8+ZWxx3kdQ= github.com/microsoftgraph/msgraph-sdk-go-core v1.3.2/go.mod h1:iD75MK3LX8EuwjDYCmh0hkojKXK6VKME33u4daCo3cE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -762,6 +831,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= @@ -776,8 +847,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -900,6 +971,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -917,6 +990,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= @@ -932,8 +1007,8 @@ github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2 github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= -github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -945,6 +1020,8 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -952,6 +1029,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -969,8 +1048,8 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -1029,6 +1108,7 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -1045,6 +1125,12 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= @@ -1094,8 +1180,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glB go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= @@ -1578,6 +1664,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +helm.sh/helm/v3 v3.18.0 h1:ItOAm3Quo0dus3NUHjs+lluqWWEIO7xrSW+zKWCrvlw= +helm.sh/helm/v3 v3.18.0/go.mod h1:43QHS1W97RcoFJRk36ZBhHdTfykqBlJdsWp3yhzdq8w= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1596,6 +1684,8 @@ k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 h1:liMHz39T5dJO1aOKHLvwaC k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/testsuites/standard_suites.go b/pkg/testsuites/standard_suites.go index 08d77ea8ba35..2b9c1a67e887 100644 --- a/pkg/testsuites/standard_suites.go +++ b/pkg/testsuites/standard_suites.go @@ -462,6 +462,17 @@ var staticSuites = []ginkgo.TestSuite{ TestTimeout: 120 * time.Minute, ClusterStabilityDuringTest: ginkgo.Disruptive, }, + { + Name: "openshift/dra-gpu-validation", + Description: templates.LongDesc(` + This test suite runs tests to validate gpu workload with dra structured parameters and Nvidia dra driver. + `), + Qualifiers: []string{ + `name.contains("[Suite:openshift/dra-gpu-validation]") && !name.contains("[Skipped]")`, + }, + Parallelism: 1, + TestTimeout: 60 * time.Minute, + }, } func withExcludedTestsFilter(baseExpr string) string { diff --git a/test/extended/dra/OWNERS b/test/extended/dra/OWNERS new file mode 100644 index 000000000000..36c4c1e54323 --- /dev/null +++ b/test/extended/dra/OWNERS @@ -0,0 +1,12 @@ +approvers: +- mrunalp +- harche +- rphillips +- haircommander +- tkashem +reviewers: +- mrunalp +- harche +- rphillips +- haircommander +- tkashem diff --git a/test/extended/dra/common_spec.go b/test/extended/dra/common_spec.go new file mode 100644 index 000000000000..a361c320ca8e --- /dev/null +++ b/test/extended/dra/common_spec.go @@ -0,0 +1,115 @@ +package dra + +import ( + "context" + "fmt" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + helper "github.com/openshift/origin/test/extended/dra/helper" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" +) + +// expected to run on both the example DRA driver and the nvidia DRA driver +// one pod, one container, requests a distinct gpu +type commonSpec struct { + f *framework.Framework + // driver name + class string + newContainer func(name string) corev1.Container + // device names in the resourceslices advertised by the driver + deviceNamesAdvertised []string + // the node onto which the pod is expected to run + node *corev1.Node +} + +func (spec commonSpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + + template := &resourceapi.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "single-gpu", + }, + } + template.Spec.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + { + Name: "gpu", + Exactly: &resourceapi.ExactDeviceRequest{DeviceClassName: spec.class}, + }, + } + + // one pod, one container + pod := helper.NewPod(namespace, "pod") + ctr := spec.newContainer("ctr") + ctr.Resources.Claims = []corev1.ResourceClaim{ + { + Name: "gpu", + }, + } + pod.Spec.Containers = append(pod.Spec.Containers, ctr) + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "gpu", + ResourceClaimTemplateName: ptr.To(template.Name), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + + g.By("creating external claim and pod") + resource := spec.f.ClientSet.ResourceV1() + _, err := resource.ResourceClaimTemplates(namespace).Create(ctx, template, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + core := spec.f.ClientSet.CoreV1() + pod, err = core.Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pod)) + + client := spec.f.ClientSet.ResourceV1().ResourceClaims(namespace) + result, err := client.List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(result)) + }) + + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, spec.f.ClientSet, pod) + o.Expect(err).To(o.BeNil()) + + pod, err = core.Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := helper.GetResourceClaimFor(ctx, spec.f.ClientSet, pod) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + o.Expect(claim.Status.Allocation).NotTo(o.BeNil()) + o.Expect(len(claim.Status.Allocation.Devices.Results)).To(o.Equal(1)) + + allocation := claim.Status.Allocation.Devices.Results[0] + o.Expect(allocation.Request).To(o.Equal("gpu")) + o.Expect(allocation.Driver).To(o.Equal(spec.class)) + o.Expect(spec.deviceNamesAdvertised).To(o.ContainElement(allocation.Device)) + + g.By(fmt.Sprintf("retrieving logs for pod %s/%s", pod.Namespace, pod.Name)) + logs, err := helper.GetLogs(ctx, spec.f.ClientSet, pod.Namespace, pod.Name, ctr.Name) + o.Expect(err).To(o.BeNil()) + t.Logf("logs from container: %s\n%s\n", ctr.Name, logs) +} diff --git a/test/extended/dra/cuda_ipc.go b/test/extended/dra/cuda_ipc.go new file mode 100644 index 000000000000..d7388c6b981f --- /dev/null +++ b/test/extended/dra/cuda_ipc.go @@ -0,0 +1,543 @@ +package dra + +import ( + "context" + "fmt" + "strings" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// two whole gpus, one we label as the producer, and the other consumer +// the producer pod sees only the 'producer' gpu +// the consumer pod sees both the 'producer' and the 'consumer' gpus +// the consumer pod is passed the UUID of the gpu which it will use +// by calling cudaSetDevice +// no need to use 'CUDA_VISIBLE_DEVICES' or 'NVIDIA_VISIBLE_DEVICES' +type shareDataWithCUDAIPC struct { + f *framework.Framework + class string + node *corev1.Node + driver *nvidia.NvidiaDRADriverGPU + // the UUID of the gpu on which the producer and the consumer should run + producer, consumer string +} + +func (spec shareDataWithCUDAIPC) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + clientset := spec.f.ClientSet + + newRequest := func(name, gpuUUID string) resourceapi.DeviceRequest { + return resourceapi.DeviceRequest{ + Name: name, + Exactly: &resourceapi.ExactDeviceRequest{ + DeviceClassName: spec.class, + Selectors: []resourceapi.DeviceSelector{ + { + CEL: &resourceapi.CELDeviceSelector{ + Expression: fmt.Sprintf("device.attributes['%s'].uuid == \"%s\"", spec.class, gpuUUID), + }, + }, + }, + }, + } + } + // a claim that allocates two whole gpus, named as 'producer' and 'consumer' + // producer and consumer should match the uuids respectively + claim := &resourceapi.ResourceClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "two-gpus", + }, + } + claim.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + newRequest("producer", spec.producer), + newRequest("consumer", spec.consumer), + } + + // producer pod + producer := helper.NewPod(namespace, "producer") + producer.Spec.Containers = []corev1.Container{producerCtr()} + // producer pod has access to the 'producer' gpu only + producer.Spec.Containers[0].Resources.Claims = []corev1.ResourceClaim{{Name: "gpu", Request: "producer"}} + producer.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "gpu", + ResourceClaimName: ptr.To(claim.Name), + }, + } + shared(producer) + + // consumer pod + consumer := helper.NewPod(namespace, "consumer") + // nvidia device driver publishes a uuid of a gpu in this format 'GPU-{uuid}' + // this is what we see in nvidia-smi, and ResourceSlices, when a CUDA + // application enumerates the gpu devices using cudaGetDeviceProperties + // it sees the uuid only. + uuid, _ := strings.CutPrefix("GPU-", spec.consumer) + consumer.Spec.Containers = []corev1.Container{consumerCtr(uuid)} + // TODO: although the consumer pod runs its workload on the consumer gpu, it + // needs access to the producer gpu for IPC, otherwsie we see the + // following error from the consumer when it tries to open the + // handle using cudaIpcOpenMemHandle + // ERROR opening IPC handle: invalid argument + // + // leaving Request empty ensures that the consumer has access + // to all allocated devices in the claim. + consumer.Spec.Containers[0].Resources.Claims = []corev1.ResourceClaim{{Name: "gpu", Request: ""}} + consumer.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "gpu", + ResourceClaimName: ptr.To(claim.Name), + }, + } + shared(consumer) + + t.Logf("producer uuid: %s consumer uuid: %s", spec.producer, spec.consumer) + + o.Expect(helper.EnsureNamespaceLabel(ctx, clientset, namespace, "pod-security.kubernetes.io/enforce", "privileged")).To(o.BeNil()) + + g.By("creating external claim and pod(s)") + t.Logf("creating resource claim: \n%s\n", framework.PrettyPrintJSON(claim)) + _, err := clientset.ResourceV1().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + t.Logf("creating producer pod: \n%s\n", framework.PrettyPrintJSON(producer)) + producer, err = clientset.CoreV1().Pods(namespace).Create(ctx, producer, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + t.Logf("creating consumer pod: \n%s\n", framework.PrettyPrintJSON(consumer)) + consumer, err = clientset.CoreV1().Pods(namespace).Create(ctx, consumer, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("pods in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pods)) + + claims, err := clientset.ResourceV1().ResourceClaims(namespace).List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(claims)) + }) + + g.By(fmt.Sprintf("waiting for the producer pod %s/%s to be running", producer.Namespace, producer.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, clientset, producer) + o.Expect(err).To(o.BeNil()) + + g.By(fmt.Sprintf("waiting for the consumer pod %s/%s to be running", consumer.Namespace, consumer.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, clientset, consumer) + o.Expect(err).To(o.BeNil()) + + // verify that the producer and the consumer should see the gpu as expected + for _, v := range []struct { + pod *corev1.Pod + gpus []string + }{ + {pod: producer, gpus: []string{spec.producer}}, + {pod: consumer, gpus: []string{spec.producer, spec.consumer}}, + } { + ctr := v.pod.Spec.Containers[0] + g.By(fmt.Sprintf("running nvidia-smi command into the container %s/%s container: %s", v.pod.Namespace, v.pod.Name, ctr.Name)) + gpus, err := nvidia.QueryGPUUsedByContainer(ctx, t, spec.f, v.pod.Name, v.pod.Namespace, ctr.Name) + o.Expect(err).To(o.BeNil()) + o.Expect(gpus.UUIDs()).To(o.ConsistOf(v.gpus)) + } + + g.By("retrieving pod logs") + for _, pod := range []*corev1.Pod{producer, consumer} { + for _, ctr := range pod.Spec.Containers { + logs, err := helper.GetLogs(ctx, clientset, pod.Namespace, pod.Name, ctr.Name) + o.Expect(err).To(o.BeNil()) + t.Logf("logs from pod: %s, container: %s\n%s\n", pod.Name, ctr.Name, logs) + } + } +} + +func shared(pod *corev1.Pod) { + pod.Spec.HostIPC = true + pod.Spec.HostPID = true + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + pod.Spec.Volumes = []corev1.Volume{ + { + Name: "shared-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/tmp/cuda-ipc-shared", + Type: ptr.To(corev1.HostPathDirectoryOrCreate), + }, + }, + }, + } + + ctr := &pod.Spec.Containers[0] + ctr.VolumeMounts = append(ctr.VolumeMounts, corev1.VolumeMount{Name: "shared-volume", MountPath: "/shared"}) + ctr.SecurityContext = &corev1.SecurityContext{ + Privileged: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"IPC_LOCK"}, + }, + } + + // both pods should create the file '/tmp/ready' when they have successfully + // handled CUDA IPC, and before they go into a permanent sleep. + ctr.StartupProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"cat", "/tmp/ready"}, + }, + }, + InitialDelaySeconds: 5, + FailureThreshold: 12, + PeriodSeconds: 10, + } +} + +func producerCtr() corev1.Container { + return corev1.Container{ + Name: "producer", + Image: "nvidia/cuda:12.4.1-devel-ubuntu22.04", + Command: []string{"/bin/bash", "-c"}, + Env: []corev1.EnvVar{ + // ideally we shouldn't have to set this env var, usually nvidia + // dra driver or ctk will inject this if necessary. + // {Name: "CUDA_VISIBLE_DEVICES", Value: "0"}, + }, + Args: []string{` + echo "Producer: Listing available GPUs with nvidia-smi..." + nvidia-smi -L + echo "Producer: GPU UUIDs:" + nvidia-smi --query-gpu=uuid --format=csv,noheader + echo "Producer: GPU information complete." + echo "" + + cat > /tmp/producer.cu << 'EOF' + #include + #include + #include + #include + + int main() { + void* devPtr; + cudaIpcMemHandle_t handle; + int deviceCount; + + printf("Producer: Initializing CUDA...\n"); + fflush(stdout); + + // Check available GPUs + cudaError_t err = cudaGetDeviceCount(&deviceCount); + if (err != cudaSuccess) { + printf("ERROR getting device count: %s\n", cudaGetErrorString(err)); + return 1; + } + printf("Producer: Found %d GPU(s)\n", deviceCount); + + // List all available GPUs + for (int i = 0; i < deviceCount; i++) { + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, i); + printf("Producer: GPU %d: %s\n", i, prop.name); + } + fflush(stdout); + + err = cudaSetDevice(0); + if (err != cudaSuccess) { + printf("ERROR setting CUDA device: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Allocating GPU memory on device 0...\n"); + fflush(stdout); + err = cudaMalloc(&devPtr, 1024 * 1024); // 1MB + if (err != cudaSuccess) { + printf("ERROR allocating GPU memory: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Writing test data to GPU memory...\n"); + fflush(stdout); + int* hostData = (int*)malloc(1024 * 1024); + for (int i = 0; i < 256 * 1024; i++) { + hostData[i] = i + 42; // Simple pattern: index + 42 + } + err = cudaMemcpy(devPtr, hostData, 1024 * 1024, cudaMemcpyHostToDevice); + if (err != cudaSuccess) { + printf("ERROR copying data to GPU: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Creating IPC handle...\n"); + fflush(stdout); + err = cudaIpcGetMemHandle(&handle, devPtr); + if (err != cudaSuccess) { + printf("ERROR creating IPC handle: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Writing handle to shared volume...\n"); + fflush(stdout); + FILE* f = fopen("/shared/cuda_ipc_handle.dat", "wb"); + if (!f) { + printf("ERROR: Could not open handle file for writing\n"); + return 1; + } + fwrite(&handle, sizeof(handle), 1, f); + fclose(f); + + printf("Producer: Success! Memory contains values 42, 43, 44, 45, 46...\n"); + printf("Producer: Hanging infinitely to keep GPU memory alive...\n"); + fflush(stdout); + + // signal that the producer has created the IPC handle + FILE* ready = fopen("/tmp/ready", "w"); + if (!ready) { + printf("ERROR: Could not open liveness file for writing\n"); + return 1; + } + fclose(ready); + + // Hang forever to keep the GPU memory allocated + while (1) { + sleep(3600); + } + + return 0; + } +EOF + + echo "Compiling producer..." + nvcc /tmp/producer.cu -o /tmp/producer + echo "Starting producer..." + /tmp/producer + +`}, + } +} + +func consumerCtr(uuid string) corev1.Container { + return corev1.Container{ + Name: "consumer", + Image: "nvidia/cuda:12.4.1-devel-ubuntu22.04", + Command: []string{"/bin/bash", "-c"}, + // this dictates to the consumer which gpu it should set using cudaSetDevice + // since it has access to the producer gpu as well + Env: []corev1.EnvVar{ + {Name: "GPU_UUID", Value: uuid}, + }, + Args: []string{` + echo "Consumer: Waiting for producer to create handle..." + + # Wait for the handle file to be created + while [ ! -f /shared/cuda_ipc_handle.dat ]; do + echo "Consumer: Waiting for handle file..." + sleep 2 + done + + echo "Consumer: Handle file found!" + echo "Consumer: Listing available GPUs with nvidia-smi..." + nvidia-smi -L + echo "Consumer: GPU UUIDs:" + nvidia-smi --query-gpu=uuid --format=csv,noheader + echo "Consumer: GPU information complete." + echo "" + sleep 2 + + cat > /tmp/consumer.cu << 'EOF' + #include + #include + #include + #include + + // Helper function to print a cudaUUID_t for verification + void print_uuid(const cudaUUID_t uuid) { + for (int i = 0; i < 16; i++) { + printf("%02x", (unsigned char)uuid.bytes[i]); + if (i == 3 || i == 5 || i == 7 || i == 9) { + printf("-"); + } + } + printf("\n"); + } + + int convert_to_uuid(const char* src, unsigned char* dest) { + // Use sscanf to parse the UUID string with hyphens + int bytes_read; + bytes_read = sscanf(src, + "%02hhx%02hhx%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", + &dest[0], &dest[1], &dest[2], &dest[3], + &dest[4], &dest[5], + &dest[6], &dest[7], + &dest[8], &dest[9], + &dest[10], &dest[11], &dest[12], &dest[13], &dest[14], &dest[15]); + return bytes_read; + } + + // Helper function to compare two cudaUUID_t structures + bool uuid_equal(cudaUUID_t a, cudaUUID_t b) { + for (int i = 0; i < sizeof(a.bytes); i++) { + if (a.bytes[i] != b.bytes[i]) { + return false; + } + } + return true; + } + + int main() { + const char* uuid_env_var = getenv("GPU_UUID"); + if (uuid_env_var == NULL) { + printf("Error: Environment variable 'GPU_UUID' not set.\n"); + return 1; + } + printf("Read GPU UUID from env: %s\n", uuid_env_var); + + cudaUUID_t uuid; + // A pointer to the byte array inside the cudaUUID_t struct + unsigned char* uuid_bytes = (unsigned char*)uuid.bytes; + int bytes_read; + bytes_read = convert_to_uuid(uuid_env_var, uuid_bytes); + if (bytes_read != 16) { + printf("Error: Failed to parse UUID string. Expected 16 bytes, but read %d.\n", bytes_read); + return 1; + } + + printf("Converted to cudaUUID_t: "); + print_uuid(uuid); + printf("Consumer: Initializing CUDA...\n"); + fflush(stdout); + + // find the matching GPU + int deviceCount; + cudaError_t err = cudaGetDeviceCount(&deviceCount); + if (err != cudaSuccess) { + printf("ERROR getting device count: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: Found %d GPU(s), finding the matching device index\n", deviceCount); + int device_index = -1; + for (int i = 0; i < deviceCount; i++) { + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, i); + int match = memcmp(prop.uuid.bytes, uuid.bytes, sizeof(uuid.bytes)); + printf("Consumer: GPU index: %d, name: %s, match: %d, uuid: ", i, prop.name, match == 0); + print_uuid(prop.uuid); + if (match == 0) { + device_index = i; + break; + } + } + if (device_index == -1) { + printf("Could not find a CUDA device matching the UUID.\n"); + return 1; + } + + err = cudaSetDevice(device_index); + if (err != cudaSuccess) { + printf("ERROR setting CUDA device: %s\n", cudaGetErrorString(err)); + return 1; + } + + void* devPtr; + cudaIpcMemHandle_t handle; + + printf("Consumer: Reading IPC handle from shared volume...\n"); + fflush(stdout); + FILE* f = fopen("/shared/cuda_ipc_handle.dat", "rb"); + if (!f) { + printf("ERROR: Handle file not found\n"); + return 1; + } + fread(&handle, sizeof(handle), 1, f); + fclose(f); + + printf("Consumer: Opening IPC memory handle...\n"); + fflush(stdout); + err = cudaIpcOpenMemHandle(&devPtr, handle, cudaIpcMemLazyEnablePeerAccess); + if (err != cudaSuccess) { + printf("ERROR opening IPC handle: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: Successfully opened shared GPU memory!\n"); + fflush(stdout); + + // Read the data once + int* hostData = (int*)malloc(1024 * sizeof(int)); + err = cudaMemcpy(hostData, devPtr, 1024 * sizeof(int), cudaMemcpyDeviceToHost); + if (err != cudaSuccess) { + printf("ERROR reading GPU memory: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: First 10 values from shared memory: "); + for (int i = 0; i < 10; i++) { + printf("%d ", hostData[i]); + } + printf("\n"); + fflush(stdout); + + // Verify expected pattern (index + 42) + bool correct = true; + for (int i = 0; i < 1024; i++) { + if (hostData[i] != i + 42) { + correct = false; + break; + } + } + + if (correct) { + printf("Consumer: ✓ Data verification PASSED!\n"); + } else { + printf("Consumer: ✗ Data verification FAILED!\n"); + } + + printf("Consumer: Success! Hanging infinitely...\n"); + fflush(stdout); + + // signal that the consumer has read the data through the IPC handle + FILE* ready = fopen("/tmp/ready", "w"); + if (!ready) { + printf("ERROR: Could not open liveness file for writing\n"); + return 1; + } + fclose(ready); + + // Hang forever + while (1) { + sleep(3600); + } + + return 0; + } +EOF + + echo "Compiling consumer..." + nvcc /tmp/consumer.cu -o /tmp/consumer + echo "Starting consumer..." + /tmp/consumer + +`}, + } +} diff --git a/test/extended/dra/distinct_gpus.go b/test/extended/dra/distinct_gpus.go new file mode 100644 index 000000000000..a9574aa06b13 --- /dev/null +++ b/test/extended/dra/distinct_gpus.go @@ -0,0 +1,164 @@ +package dra + +import ( + "context" + "fmt" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// two pods, one container each, and each container uses two distinct gpus +type distinctGPUsSpec struct { + f *framework.Framework + class string + // the node onto which the pods are expected to run + node *corev1.Node + // UUID of the two distinct gpus + gpu0, gpu1 string +} + +func (spec distinctGPUsSpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + newResourceClaim := func(name, request, gpu string) *resourceapi.ResourceClaim { + claim := &resourceapi.ResourceClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + claim.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + { + Name: request, + Exactly: &resourceapi.ExactDeviceRequest{ + DeviceClassName: spec.class, + Selectors: []resourceapi.DeviceSelector{ + { + CEL: &resourceapi.CELDeviceSelector{ + Expression: fmt.Sprintf("device.attributes['%s'].uuid == \"%s\"", spec.class, gpu), + }, + }, + }, + }, + }, + } + return claim + } + newContainer := func(name string) corev1.Container { + return corev1.Container{ + Name: name, + Image: "nvidia/cuda:12.4.1-devel-ubuntu22.04", + Command: []string{"bash", "-c"}, + Args: []string{"nvidia-smi -L; trap 'exit 0' TERM; sleep 9999 & wait"}, + } + } + newPod := func(podName, ctrName, claim, request string) *corev1.Pod { + pod := helper.NewPod(namespace, podName) + + ctr := newContainer(ctrName) + ctr.Resources.Claims = []corev1.ResourceClaim{ + { + Name: "gpu", + Request: request, + }, + } + pod.Spec.Containers = append(pod.Spec.Containers, ctr) + + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "gpu", + ResourceClaimName: ptr.To(claim), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + return pod + } + + g.By("creating resourceclaims") + for _, claim := range []*resourceapi.ResourceClaim{ + newResourceClaim("pod-a", "nvidia-gpu-0", spec.gpu0), + newResourceClaim("pod-b", "nvidia-gpu-1", spec.gpu1), + } { + t.Logf("creating resourceclaim: \n%s\n", framework.PrettyPrintJSON(claim)) + _, err := spec.f.ClientSet.ResourceV1().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + } + + pods := []*corev1.Pod{} + for _, want := range []*corev1.Pod{ + newPod("pod-a", "a-ctr", "pod-a", "nvidia-gpu-0"), + newPod("pod-b", "b-ctr", "pod-b", "nvidia-gpu-1"), + } { + t.Logf("creating pod: \n%s\n", framework.PrettyPrintJSON(want)) + pod, err := spec.f.ClientSet.CoreV1().Pods(namespace).Create(ctx, want, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + pods = append(pods, pod) + } + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + pods, err := spec.f.ClientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pods)) + + claims, err := spec.f.ClientSet.ResourceV1().ResourceClaims(namespace).List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(claims)) + }) + + want := map[string]bool{ + spec.gpu0: true, + spec.gpu1: true, + } + for _, pod := range pods { + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err := e2epodutil.WaitForPodRunningInNamespace(ctx, spec.f.ClientSet, pod) + o.Expect(err).To(o.BeNil()) + + // get the latest revision of the pod so we can verify its expected state + pod, err = spec.f.ClientSet.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + + // the pods should run on the expected node + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := spec.f.ClientSet.ResourceV1().ResourceClaims(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + o.Expect(claim.Status.Allocation).NotTo(o.BeNil()) + o.Expect(len(claim.Status.Allocation.Devices.Results)).To(o.Equal(1)) + + result := claim.Status.Allocation.Devices.Results[0] + o.Expect(result.Request).To(o.Equal(pod.Spec.Containers[0].Resources.Claims[0].Request)) + o.Expect(result.Device).ToNot(o.BeEmpty()) + + t.Logf("pod %s/%s has been allocated nvidia gpu: %s", pod.Namespace, pod.Name, result.Device) + + ctr := pod.Spec.Containers[0] + g.By(fmt.Sprintf("running nvidia-smi command into pod %s/%s container: %s", pod.Namespace, pod.Name, ctr.Name)) + gpus, err := nvidia.QueryGPUUsedByContainer(ctx, t, spec.f, pod.Name, pod.Namespace, ctr.Name) + o.Expect(err).To(o.BeNil()) + o.Expect(len(gpus)).To(o.Equal(1)) + + o.Expect(want).To(o.HaveKey(gpus[0].UUID)) + delete(want, gpus[0].UUID) + } + + o.Expect(len(want)).To(o.Equal(0)) +} diff --git a/test/extended/dra/dra.go b/test/extended/dra/dra.go new file mode 100644 index 000000000000..9b0754734051 --- /dev/null +++ b/test/extended/dra/dra.go @@ -0,0 +1,610 @@ +package dra + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + draexample "github.com/openshift/origin/test/extended/dra/example" + helper "github.com/openshift/origin/test/extended/dra/helper" + "github.com/openshift/origin/test/extended/dra/nvidia" + exutil "github.com/openshift/origin/test/extended/util" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + clientgodynamic "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +const ( + // the test applies this label to the worker node that has been + // selected, the example driver will be installed on this node + exampleNodeLabel = "dra.e2e.openshift.io/example" + + // NFD will apply this label to the worker node if + // an Nvidia GPU is present on the node + nvidiaGPU = "feature.node.kubernetes.io/pci-10de.present" + + // the test applies this label to the gpu worker node that has been + // selected, the nvidia dra driver will be installed on this node + gpuNodeLabel = "dra.e2e.openshift.io/nvidia-dra-driver" + + // this is the node label that bears the RHCOS version + rhcosVersion = "feature.node.kubernetes.io/system-os_release.OSTREE_VERSION" +) + +var _ = g.Describe("[sig-node] [Suite:openshift/dra-gpu-validation] [Feature:DynamicResourceAllocation]", g.Ordered, func() { + defer g.GinkgoRecover() + + // whether drivers/operators should be uninstalled after the test suite is done + var removeDriver bool + + // clients used by the setup code inside BeforeAll + var ( + config *rest.Config + clientset *kubernetes.Clientset + dynamic *clientgodynamic.DynamicClient + ) + + t := g.GinkgoTB() + // this framework object used by setup code only + // TODO: can we avoid BeforeEach, and AfterEach being invoked + setup := framework.NewDefaultFramework("dra-setup") + setup.SkipNamespaceCreation = true + oc := exutil.NewCLIWithFramework(setup) + + g.BeforeAll(func(ctx context.Context) { + var err error + config, err = clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) + framework.ExpectNoError(err) + clientset, err = kubernetes.NewForConfig(config) + framework.ExpectNoError(err) + dynamic, err = clientgodynamic.NewForConfig(clientgodynamic.ConfigFor(config)) + framework.ExpectNoError(err) + + // TODO: the framework fills these when BeforeEach is invoked + // we are using this framework object in setup code + setup.ClientSet = clientset + setup.DynamicClient = dynamic + }) + + g.Context("[Driver:dra-example-driver]", func() { + var ( + example *draexample.ExampleDRADriver + // the node on which the test pod will run + node *corev1.Node + ) + + // setup cert-manager, it's a dependency if you enable + // webook for the example dra driver + g.BeforeAll(func(ctx context.Context) { + namespace := "cert-manager" + cm := helper.NewHelmInstaller(g.GinkgoTB(), helper.HelmParameters{ + Namespace: namespace, + CreateNamespace: true, + Wait: true, + ChartURL: "oci://quay.io/jetstack/charts/cert-manager", + ReleaseName: "cert-manager", + ChartVersion: "v1.17.2", + Values: map[string]any{ + "crds": map[string]any{ + "enabled": true, + }, + }, + }) + g.By("installing cert-manager") + o.Expect(cm.Install(ctx)).To(o.Succeed(), "cert-manager install should not fail") + + if removeDriver { + g.DeferCleanup(func(ctx context.Context) { + + g.By("cleaning up cert-manager") + o.Expect(cm.Remove(ctx)).ToNot(o.HaveOccurred(), "cert-manager cleanup should not fail") + }) + } + }) + + // pick a worker node onto which the DRA driver will be + // installed, and the test pod(s) will run. + g.BeforeAll(func(ctx context.Context) { + client := clientset.CoreV1().Nodes() + result, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=", "node-role.kubernetes.io/worker"), + }) + o.Expect(err).Should(o.BeNil()) + o.Expect(len(result.Items)).To(o.BeNumerically(">=", 1)) + + // choose the first worker node + node = &result.Items[0] + + err = helper.EnsureNodeLabel(ctx, clientset, node.Name, exampleNodeLabel, "") + o.Expect(err).Should(o.BeNil()) + t.Logf("node=%s has been selected, the test will be exercised on this node", node.Name) + }) + + // setup the example DRA driver + g.BeforeAll(func(ctx context.Context) { + namespace := "dra-example-driver" + o.Expect(helper.UsePrivilegedSCC(ctx, clientset, "dra-example-driver-service-account", namespace)).To(o.BeNil()) + example = draexample.NewExampleDRADriver(g.GinkgoTB(), clientset, helper.HelmParameters{ + Namespace: namespace, + CreateNamespace: true, + ChartURL: "oci://registry.k8s.io/dra-example-driver/charts/dra-example-driver", + ReleaseName: "dra-example-driver", + ChartVersion: "0.2.0", + Values: map[string]any{ + "webhook": map[string]any{ + "enabled": false, + }, + // the plugin should run on the worker node we selected + "kubeletPlugin": map[string]any{ + "nodeSelector": map[string]any{ + exampleNodeLabel: "", + }, + }, + }, + }) + + g.By("installing dra-example-driver") + o.Expect(example.Setup(ctx)).To(o.Succeed(), "dra-example-driver deployment should not fail") + + if removeDriver { + g.DeferCleanup(func(ctx context.Context) { + g.By("cleaning up dra-example-driver") + o.Expect(example.Cleanup(ctx)).ToNot(o.HaveOccurred(), "dra-example-driver cleanup should not fail") + }) + } + + g.By("waiting for dra-example-driver to be ready") + o.Eventually(ctx, example.Ready).WithPolling(2*time.Second).Should(o.BeNil(), "dra-example-driver should be ready") + }) + + // initialize the framework object before any of our own BeforeEach func + var f *framework.Framework = framework.NewDefaultFramework("dra-common") + + g.BeforeEach(func(ctx context.Context) { + g.By(fmt.Sprintf("waiting for the driver: %s to advertise its resources", example.Class())) + dc, slices := example.EventuallyPublishResources(ctx, node) + t.Logf("the driver has published deviceclasses: %s", framework.PrettyPrintJSON(dc)) + t.Logf("the driver has published resourceslices: %s", framework.PrettyPrintJSON(slices)) + }) + + g.It("one pod, one container, asking for 1 distinct GPU", func(ctx context.Context) { + devices, err := example.ListPublishedDevices(ctx, node) + o.Expect(err).Should(o.BeNil()) + t.Logf("collected advertised devices from the resourceslices: %v", devices) + o.Expect(len(devices)).To(o.BeNumerically(">=", 1)) + + common := commonSpec{ + f: f, + class: example.Class(), + node: node, + deviceNamesAdvertised: devices, + newContainer: func(name string) corev1.Container { + return corev1.Container{ + Name: name, + Image: e2epodutil.GetDefaultTestImage(), + Command: e2epodutil.GenerateScriptCmd("env && sleep 100000"), + SecurityContext: e2epodutil.GetRestrictedContainerSecurityContext(), + } + }, + } + common.Test(ctx, g.GinkgoTB()) + }) + }) + + // Nvidia GPU driver setup and tests follow + g.Context("[Driver:dra-nvidia-driver]", func() { + // the gpu worker node we select where the test pods will run + var node *corev1.Node + + // setup node-feature-discovery + g.BeforeAll(func(ctx context.Context) { + namespace := "node-feature-discovery" + // the service account associated with the nfd worker should use the privileged SCC + o.Expect(helper.UsePrivilegedSCC(ctx, clientset, "node-feature-discovery-worker", namespace)).To(o.BeNil()) + + nfd := helper.NewHelmInstaller(g.GinkgoTB(), helper.HelmParameters{ + Namespace: namespace, + CreateNamespace: true, + ChartURL: "https://github.com/kubernetes-sigs/node-feature-discovery/releases/download/v0.17.3/node-feature-discovery-chart-0.17.3.tgz", + Wait: true, + ReleaseName: "node-feature-discovery", + ChartVersion: "v0.17.3", + Values: nvidia.DefaultNFDHelmValues(), + }) + g.By("installing node-feature-discovery") + o.Expect(nfd.Install(ctx)).To(o.Succeed(), "node-feature-discovery install should not fail") + + if removeDriver { + g.DeferCleanup(func(ctx context.Context) { + g.By("cleaning up node-feature-discovery") + o.Expect(nfd.Remove(ctx)).ToNot(o.HaveOccurred(), "node-feature-discovery cleanup should not fail") + }) + } + + g.By("waiting for nfd to label the gpu worker node") + o.Eventually(ctx, func(ctx context.Context) error { + result, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=true", nvidiaGPU), + }) + if err != nil || len(result.Items) == 0 { + return fmt.Errorf("still waiting for node labels to show up: %w", err) + } + // choose the first gpu worker node + node = &result.Items[0] + + err = helper.EnsureNodeLabel(ctx, clientset, node.Name, gpuNodeLabel, "") + o.Expect(err).Should(o.BeNil()) + t.Logf("node=%s has been selected, the test will be exercised on this node", node.Name) + return nil + }).WithPolling(time.Second).Should(o.BeNil(), "timeout while waiting for the expected node label to show up") + + t.Logf("node=%s has been selected, test will be exercised on this node", node.Name) + t.Logf("labels=%s", framework.PrettyPrintJSON(node.Labels)) + }) + + // we build the nvidia gpu driver using the driver toolkit, let's + // ensure that the driver toolkit is installed properly + g.BeforeAll(func(ctx context.Context) { + dtk := nvidia.NewDriverToolkitProber(oc, clientset) + osrelease, err := dtk.GetOSReleaseInfo(t, node) + o.Expect(err).Should(o.BeNil()) + o.Expect(osrelease.RHCOSVersion).NotTo(o.BeEmpty()) + + t.Logf("node=%s os-release: %+v", node.Name, osrelease) + // TODO: nfd does not always label the node with the right RHCOS version + // let's make sure, the selected gpu node has the right RHCOS + // version, otherwise the nvidia gpu operator will fail to + // install the gpu driver + if _, ok := node.Labels[rhcosVersion]; !ok { + g.By(fmt.Sprintf("adding label: %q to node: %s", rhcosVersion, node.Name)) + err := helper.EnsureNodeLabel(ctx, clientset, node.Name, rhcosVersion, osrelease.RHCOSVersion) + o.Expect(err).Should(o.BeNil()) + } + }) + + var operator *nvidia.GpuOperator + // setup nvidia gpu operator + g.BeforeAll(func(ctx context.Context) { + parameters := helper.HelmParameters{ + Namespace: "nvidia-gpu-operator", + CreateNamespace: true, + ChartURL: "https://helm.ngc.nvidia.com/nvidia/charts/gpu-operator-v25.3.2.tgz", + ReleaseName: "gpu-operator", + ChartVersion: "v25.3.2", + Wait: true, + Values: nvidia.DefaultGPUOperatorHelmValues(), + } + operator = nvidia.NewGPUOperatorInstaller(g.GinkgoTB(), clientset, setup, parameters) + g.By("installing nvidia-gpu-operator") + o.Expect(operator.Install(ctx)).To(o.Succeed(), "nvidia-gpu-operator install should not fail") + + g.By("waiting for nvidia-gpu-operator to be ready") + o.Expect(operator.Ready(ctx, node)).To(o.Succeed(), "nvidia-gpu-operator should be ready") + + if removeDriver { + g.DeferCleanup(func(ctx context.Context) { + g.By("cleaning up nvidia-gpu-operator") + o.Expect(operator.Cleanup(ctx)).ToNot(o.HaveOccurred(), "nvidia-gpu-operator cleanup should not fail") + }) + } + }) + + // setup nvidia-dra-driver-gpu + // TODO: the DRA driver is not included in the gpu operator yet + var driver *nvidia.NvidiaDRADriverGPU + g.BeforeAll(func(ctx context.Context) { + namespace := "nvidia-dra-driver-gpu" + // TODO: mps-control-daemon uses the default service account in this namespace + // and it needs to use the privileged SCC + o.Expect(helper.UsePrivilegedSCC(ctx, clientset, "default", namespace)).To(o.BeNil()) + + driver = nvidia.NewNvidiaDRADriverGPU(g.GinkgoTB(), clientset, helper.HelmParameters{ + Namespace: namespace, + CreateNamespace: true, + ChartURL: "https://helm.ngc.nvidia.com/nvidia/charts/nvidia-dra-driver-gpu-25.3.1.tgz", + ReleaseName: "nvidia-dra-driver-gpu", + ChartVersion: "25.3.1", + Wait: true, + Values: nvidia.DefaultDRADriverHelmValues(), + }) + + g.By("installing nvidia-dra-driver-gpu") + o.Expect(driver.Setup(ctx)).To(o.Succeed(), "nvidia-dra-driver-gpu deployment should not fail") + + if removeDriver { + g.DeferCleanup(func(ctx context.Context) { + g.By("cleaning up nvidia-dra-driver-gpu") + o.Expect(driver.Cleanup(ctx)).ToNot(o.HaveOccurred(), "nvidia-dra-driver-gpu cleanup should not fail") + }) + } + + g.By("waiting for nvidia-dra-driver-gpu to be ready") + o.Expect(driver.Ready(ctx, node)).To(o.Succeed(), "nvidia-dra-driver-gpu should be ready") + }) + + g.BeforeAll(func(ctx context.Context) { + // TODO: nvidia gpu operator checks if the following + // label is present on the node as a precondition, before + // it installs mig-manager on the gpu node, or other + // components + product := "nvidia.com/gpu.product" + if p, ok := node.Labels[product]; !ok || p == "" { + t.Logf("node: %s label: %s is not set", node.Name, product) + + g.By(fmt.Sprintf("discovering gpu feature on node: %s", node.Name)) + gpuProductName, err := operator.DiscoverGPUProudct(ctx, node) + o.Expect(err).Should(o.BeNil()) + o.Expect(gpuProductName).ShouldNot(o.BeEmpty()) + t.Logf("node: %s gpu proudct name: %s", node.Name, gpuProductName) + + err = helper.EnsureNodeLabel(ctx, clientset, node.Name, product, gpuProductName) + o.Expect(err).Should(o.BeNil()) + } + }) + + // initialize the framework object before any of our BeforeEach func + var f *framework.Framework = framework.NewDefaultFramework("dra-nvidia") + + g.BeforeEach(func(ctx context.Context) { + g.By(fmt.Sprintf("waiting for the driver: %s to advertise its resources", driver.Class())) + dc, slices := driver.EventuallyPublishResources(ctx, node) + t.Logf("the driver has published deviceclasses: %s", framework.PrettyPrintJSON(dc)) + t.Logf("the driver has published resourceslices: %s", framework.PrettyPrintJSON(slices)) + }) + + g.It("one pod, one container, asking for 1 distinct GPU", func(ctx context.Context) { + advertised, err := driver.ListPublishedDevicesFromResourceSlice(ctx, node) + o.Expect(err).Should(o.BeNil()) + t.Logf("collected advertised devices from the resourceslices: %v", advertised) + + // we want the pod to use a whole gpu + gpus := advertised.FilterBy(func(gpu nvidia.NvidiaGPU) bool { return gpu.Type == "gpu" }).Names() + t.Logf("the following whole gpus are available to be used: %v", gpus) + o.Expect(len(gpus)).To(o.BeNumerically(">=", 1)) + + common := commonSpec{ + f: f, + class: driver.Class(), + node: node, + deviceNamesAdvertised: gpus, + newContainer: func(name string) corev1.Container { + return corev1.Container{ + Name: name, + Image: "ubuntu:22.04", + Command: []string{"bash", "-c"}, + Args: []string{"nvidia-smi -L; trap 'exit 0' TERM; sleep 9999 & wait"}, + } + }, + } + common.Test(ctx, g.GinkgoTB()) + }) + + g.It("two pods, one container each, asking for 1 distinct GPU", func(ctx context.Context) { + advertised, err := driver.ListPublishedDevicesFromResourceSlice(ctx, node) + o.Expect(err).Should(o.BeNil()) + t.Logf("collected advertised devices from the resourceslices: %v", advertised) + + gpus := advertised.FilterBy(func(gpu nvidia.NvidiaGPU) bool { return gpu.Type == "gpu" }) + t.Logf("the following whole gpus are available to be used: %v", gpus) + o.Expect(len(gpus)).To(o.BeNumerically(">=", 2), "need at least two whole gpus for this test") + + spec := distinctGPUsSpec{ + f: f, + class: driver.Class(), + node: node, + gpu0: gpus[0].UUID, + gpu1: gpus[1].UUID, + } + spec.Test(ctx, g.GinkgoTB()) + }) + + g.It("two pods, share data using CUDA IPC", func(ctx context.Context) { + advertised, err := driver.ListPublishedDevicesFromResourceSlice(ctx, node) + o.Expect(err).Should(o.BeNil()) + t.Logf("collected advertised devices from the resourceslices: %v", advertised) + + gpus := advertised.FilterBy(func(gpu nvidia.NvidiaGPU) bool { return gpu.Type == "gpu" }) + t.Logf("the following whole gpus are available to be used: %v", gpus) + o.Expect(len(gpus)).To(o.BeNumerically(">=", 2), "need at least two whole gpus for this test") + + cudaIPC := shareDataWithCUDAIPC{ + f: f, + class: driver.Class(), + node: node, + producer: gpus[1].UUID, + consumer: gpus[0].UUID, + } + cudaIPC.Test(ctx, g.GinkgoTB()) + }) + + g.Context("[TimeSlicing=true]", func() { + g.It("one pod, 2 containers, with time slice", func(ctx context.Context) { + timeSlicing := gpuTimeSlicingWithCUDASpec{ + f: f, + class: driver.Class(), + node: node, + dra: driver, + driver: operator, + } + timeSlicing.Test(ctx, g.GinkgoTB()) + }) + }) + + g.Context("[MPS=true]", func() { + g.It("one pod, 2 containers share GPU using MPS", func(ctx context.Context) { + mpsShared := mpsWithCUDASpec{ + f: f, + class: driver.Class(), + node: node, + dra: driver, + driver: operator, + } + mpsShared.Test(ctx, g.GinkgoTB()) + }) + }) + + g.Context("[TimeSlicing,MPS=true]", func() { + g.It("gpu sharing with both MPS and TimeSlicing", func(ctx context.Context) { + advertised, err := driver.ListPublishedDevicesFromResourceSlice(ctx, node) + o.Expect(err).Should(o.BeNil()) + t.Logf("collected advertised devices from the resourceslices: %v", advertised) + + gpus := advertised.FilterBy(func(gpu nvidia.NvidiaGPU) bool { return gpu.Type == "gpu" }) + t.Logf("the following whole gpus are available to be used: %v", gpus) + o.Expect(len(gpus)).To(o.BeNumerically(">=", 2), "need at least two whole gpus for this test") + + spec := gpuTimeSlicingAndMPSWithCUDASpec{ + f: f, + class: driver.Class(), + node: node, + driver: operator, + } + spec.Test(ctx, g.GinkgoTB()) + }) + }) + + g.Context("[MIGEnabled=true]", func() { + g.BeforeAll(func(ctx context.Context) { + // we will use a custom MIG configuration + config := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dra-e2e-mig-parted-config", + Namespace: operator.Namespace(), + }, + Data: map[string]string{ + "config.yaml": ` + version: v1 + mig-configs: + all-disabled: + - devices: all + mig-enabled: false + gpu-e2e: + - devices: [0] + mig-enabled: true + mig-devices: + "3g.20gb": 1 + "2g.10gb": 1 + "1g.5gb": 1 + - devices: [1, 2, 3, 4, 5, 6, 7] + mig-enabled: false +`}, + } + o.Expect(helper.EnsureConfigMap(ctx, clientset, &config)).Should(o.BeNil()) + + g.By("configuring nvidia-mig-manager") + patchBytes := ` +{ + "spec": { + "mig": { + "strategy": "mixed" + }, + "migManager": { + "config": { + "default": "all-disabled", + "name": "dra-e2e-mig-parted-config" + }, + "env": [ + { + "name": "WITH_REBOOT", + "value": "true" + } + ] + }, + "validator": { + "cuda": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + } + } +} +` + resource := dynamic.Resource(schema.GroupVersionResource{Group: "nvidia.com", Version: "v1", Resource: "clusterpolicies"}) + policy, err := resource.Patch(ctx, "cluster-policy", types.MergePatchType, []byte(patchBytes), metav1.PatchOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("gpu operator cluster policy: \n%s\n", framework.PrettyPrintJSON(policy)) + + g.By(fmt.Sprintf("waiting for nvidia-mig-manager to be ready")) + o.Expect(operator.MIGManagerReady(ctx, node)).Should(o.BeNil()) + }) + + g.It("one pod, three containers, asking for 3g.20gb, 2g.10gb, and 1g.5gb respectively", func(ctx context.Context) { + ascending := func(x, y string) bool { + return strings.Compare(x, y) < 0 // Ascending order + } + + // MIG devices we want to setup + want := []string{"3g.20gb", "2g.10gb", "1g.5gb"} + + // apply the desired MIG configuration + g.By(fmt.Sprintf("labeling node: %s for nvidia.com/mig.config: %s", node.Name, "gpu-e2e")) + err := helper.EnsureNodeLabel(ctx, clientset, node.Name, "nvidia.com/mig.config", "gpu-e2e") + o.Expect(err).Should(o.BeNil()) + + g.By("waiting for the gpu driver to advertise the expected MIG devices") + advertised := nvidia.NvidiaGPUs{} + o.Eventually(ctx, func(ctx context.Context) error { + got, err := operator.ListMIGDevicesUsingNvidiaSMI(ctx, node) + if err != nil { + return err + } + if !cmp.Equal(want, got.Names(), cmpopts.SortSlices(ascending)) { + return fmt.Errorf("still waiting for MIG devices to show up, want: %v, got: %v", want, got) + } + advertised = got + return nil + }).WithPolling(10*time.Second).Should(o.BeNil(), "timeout waiting for expected mig devices") + t.Logf("the gpu driver is advertising these mig devices %v", advertised) + + // TODO: the DRA driver does not pick up the MIG slices, restarting the plugin seems to do the trick + o.Expect(driver.RemovePluginFromNode(ctx, node)).Should(o.BeNil()) + g.By("waiting for nvidia-dra-driver-gpu to be ready") + o.Expect(driver.Ready(ctx, node)).To(o.Succeed(), "nvidia-dra-driver-gpu should be ready") + + g.By("waiting for the dra driver to advertise the mig devices in its resourceslices") + o.Eventually(ctx, func(ctx context.Context) error { + all, err := driver.ListPublishedDevicesFromResourceSlice(ctx, node) + if err != nil { + return err + } + migs := all.FilterBy(func(gpu nvidia.NvidiaGPU) bool { return gpu.Type == "mig" }) + + if want, got := advertised.UUIDs(), migs.UUIDs(); !cmp.Equal(want, got, cmpopts.SortSlices(ascending)) { + return fmt.Errorf("still waiting for the dra driver to publish the MIG devices, want: %v, got: %v", want, got) + } + t.Logf("the dra driver has published the mig devices in its resourceslices: %s", framework.PrettyPrintJSON(migs)) + return nil + }).WithPolling(time.Second).Should(o.BeNil(), "timeout while waiting for the dra driver to advertise its resources") + + mig := gpuMIGSpec{ + f: f, + class: driver.Class(), + node: node, + uuids: advertised.UUIDs(), + devices: advertised.Names(), + } + mig.Test(ctx, g.GinkgoTB()) + }) + }) + }) +}) diff --git a/test/extended/dra/example/dra-example-driver.go b/test/extended/dra/example/dra-example-driver.go new file mode 100644 index 000000000000..babe726ce374 --- /dev/null +++ b/test/extended/dra/example/dra-example-driver.go @@ -0,0 +1,95 @@ +package example + +import ( + "context" + "fmt" + "testing" + "time" + + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + helper "github.com/openshift/origin/test/extended/dra/helper" +) + +func NewExampleDRADriver(t testing.TB, clientset kubernetes.Interface, p helper.HelmParameters) *ExampleDRADriver { + return &ExampleDRADriver{ + t: t, + name: "gpu.example.com", + clientset: clientset, + helm: helper.NewHelmInstaller(t, p), + namespace: p.Namespace, + } +} + +type ExampleDRADriver struct { + t testing.TB + name string + helm *helper.HelmInstaller + clientset kubernetes.Interface + namespace string +} + +func (d *ExampleDRADriver) Class() string { return d.name } +func (d *ExampleDRADriver) Setup(ctx context.Context) error { return d.helm.Install(ctx) } +func (d *ExampleDRADriver) Cleanup(ctx context.Context) error { return d.helm.Remove(ctx) } + +func (d ExampleDRADriver) Ready(ctx context.Context) error { + client := d.clientset.AppsV1() + ds, err := client.DaemonSets(d.namespace).Get(ctx, "dra-example-driver-kubeletplugin", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get dra-example-driver-kubeletplugin: %w", err) + } + + ready, scheduled := ds.Status.NumberReady, ds.Status.DesiredNumberScheduled + if ready != scheduled { + return fmt.Errorf("dra-example-driver-kubeletplugin is not ready, DesiredNumberScheduled: %d, NumberReady: %d", scheduled, ready) + } + return nil +} + +func (d *ExampleDRADriver) EventuallyPublishResources(ctx context.Context, node *corev1.Node) (dc *resourceapi.DeviceClass, slices []resourceapi.ResourceSlice) { + class := d.name + o.Eventually(ctx, func(ctx context.Context) error { + // has the driver published the device class? + obj, err := d.clientset.ResourceV1().DeviceClasses().Get(ctx, class, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("still waiting for the driver to advertise its DeviceClass") + } + + result, err := d.clientset.ResourceV1().ResourceSlices().List(ctx, metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + class + "," + + resourceapi.ResourceSliceSelectorNodeName + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return fmt.Errorf("still waiting for the driver to advertise its ResourceSlice - %w", err) + } + dc = obj + slices = result.Items + return nil + }).WithPolling(2*time.Second).Should(o.BeNil(), "timeout while waiting for the driver to advertise its resources") + + return dc, slices +} + +func (d *ExampleDRADriver) ListPublishedDevices(ctx context.Context, node *corev1.Node) ([]string, error) { + result, err := d.clientset.ResourceV1().ResourceSlices().List(ctx, metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.name + "," + + resourceapi.ResourceSliceSelectorNodeName + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return nil, fmt.Errorf("still waiting for the driver to advertise its ResourceSlice - %w", err) + } + + devices := []string{} + for _, rs := range result.Items { + for _, device := range rs.Spec.Devices { + devices = append(devices, device.Name) + } + } + return devices, nil +} diff --git a/test/extended/dra/helper/helm-install.go b/test/extended/dra/helper/helm-install.go new file mode 100644 index 000000000000..c898b5bd1585 --- /dev/null +++ b/test/extended/dra/helper/helm-install.go @@ -0,0 +1,116 @@ +package helper + +import ( + "context" + "fmt" + "os" + "time" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" +) + +func NewHelmInstaller(logger logger, p HelmParameters) *HelmInstaller { + return &HelmInstaller{HelmParameters: p, logger: logger} +} + +type HelmParameters struct { + Namespace string + CreateNamespace bool + ChartURL string + ReleaseName string + ChartVersion string + Values map[string]interface{} + Wait bool +} + +type logger interface { + Log(args ...any) + Logf(format string, args ...any) +} + +type HelmInstaller struct { + HelmParameters + logger logger + + // save it for uninstal + config *action.Configuration +} + +func (h *HelmInstaller) Install(ctx context.Context) error { + os.Setenv("HELM_EXPERIMENTAL_OCI", "1") + os.Setenv("HELM_DEBUG", "false") + + // TODO: nvidia gpu operator does not install to the designated namespace + // this is a workaround for now + oldNS := os.Getenv("HELM_NAMESPACE") + os.Setenv("HELM_NAMESPACE", h.Namespace) + defer os.Setenv("HELM_NAMESPACE", oldNS) + + settings := cli.New() + + registryClient, err := registry.NewClient( + registry.ClientOptDebug(settings.Debug), + registry.ClientOptWriter(os.Stdout), + registry.ClientOptCredentialsFile(settings.RegistryConfig), + ) + if err != nil { + return fmt.Errorf("failed to create registry client: %w", err) + } + + h.config = &action.Configuration{} + h.config.RegistryClient = registryClient + if err := h.config.Init(settings.RESTClientGetter(), h.Namespace, os.Getenv("HELM_DRIVER"), h.logger.Logf); err != nil { + return fmt.Errorf("failed to initialize Helm action configuration: %w", err) + } + + install := action.NewInstall(h.config) + install.ChartPathOptions.Version = h.ChartVersion + localChartPath, err := install.ChartPathOptions.LocateChart(h.ChartURL, settings) + if err != nil { + return fmt.Errorf("failed to locate chart: %w", err) + } + h.logger.Logf("Downloaded chart to: %s", localChartPath) + + chart, err := loader.Load(localChartPath) + if err != nil { + return fmt.Errorf("failed to load chart: %w", err) + } + h.logger.Logf("loaded chart: %s", h.ReleaseName) + + history := action.NewHistory(h.config) + history.Max = 1 + if releases, err := history.Run(h.ReleaseName); err != nil || len(releases) <= 0 { + client := action.NewInstall(h.config) + client.Namespace = h.Namespace + client.ReleaseName = h.ReleaseName + client.CreateNamespace = h.CreateNamespace + client.Wait = h.Wait + // TODO: should be bound to the ctx + client.Timeout = 5 * time.Minute + if _, err := client.Run(chart, h.Values); err != nil { + return fmt.Errorf("failed to install chart: %w", err) + } + return nil + } + + client := action.NewUpgrade(h.config) + client.Namespace = h.Namespace + client.Wait = true + client.Timeout = 5 * time.Minute + if _, err := client.Run(h.ReleaseName, chart, h.Values); err != nil { + return fmt.Errorf("failed to upgrade chart: %w", err) + } + return nil +} + +func (h HelmInstaller) Remove(ctx context.Context) error { + client := action.NewUninstall(h.config) + client.KeepHistory = false + if _, err := client.Run(h.ReleaseName); err != nil { + return fmt.Errorf("failed to uninstall %s: %w", h.ReleaseName, err) + } + return nil +} diff --git a/test/extended/dra/helper/helper.go b/test/extended/dra/helper/helper.go new file mode 100644 index 000000000000..270e7f3bfe46 --- /dev/null +++ b/test/extended/dra/helper/helper.go @@ -0,0 +1,213 @@ +package helper + +import ( + "bufio" + "context" + "fmt" + "io" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + resourceapi "k8s.io/api/resource/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + testutils "k8s.io/kubernetes/test/utils" + + "k8s.io/utils/ptr" +) + +func NewPod(namespace, name string) *corev1.Pod { + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: make(map[string]string), + Annotations: make(map[string]string), + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + TerminationGracePeriodSeconds: ptr.To[int64](1), + SecurityContext: e2epodutil.GetRestrictedPodSecurityContext(), + }, + } + return pod +} + +func UsePrivilegedSCC(ctx context.Context, clientset *kubernetes.Clientset, sa, namespace string) error { + name := fmt.Sprintf("%s-use-scc-privileged", sa) + want := rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "system:openshift:scc:privileged", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa, + Namespace: namespace, + }, + }, + } + _, err := clientset.RbacV1().ClusterRoleBindings().Create(ctx, &want, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +func EnsureNodeLabel(ctx context.Context, clientset kubernetes.Interface, node string, key, want string) error { + client := clientset.CoreV1().Nodes() + current, err := client.Get(ctx, node, metav1.GetOptions{}) + if err != nil { + return err + } + if value, ok := current.Labels[key]; ok && value == want { + return nil + } + + if len(current.Labels) == 0 { + current.Labels = map[string]string{} + } + current.Labels[key] = want + _, err = client.Update(ctx, current, metav1.UpdateOptions{}) + return err +} + +func EnsureConfigMap(ctx context.Context, clientset kubernetes.Interface, want *corev1.ConfigMap) error { + client := clientset.CoreV1().ConfigMaps(want.Namespace) + _, err := client.Create(context.Background(), want, metav1.CreateOptions{}) + if err != nil { + if apierrors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +func GetLogs(ctx context.Context, clientset kubernetes.Interface, namespace, name, ctr string) (string, error) { + client := clientset.CoreV1().Pods(namespace) + options := corev1.PodLogOptions{Container: ctr} + r, err := client.GetLogs(name, &options).Stream(ctx) + if err != nil { + return "", fmt.Errorf("failed to get pod logs %s: %w", name, err) + } + out, err := io.ReadAll(r) + if err != nil { + return "", fmt.Errorf("failed to get pod logs %s: %w", name, err) + } + return string(out), nil +} + +func GetResourceClaimFor(ctx context.Context, clientset kubernetes.Interface, pod *corev1.Pod) (*resourceapi.ResourceClaim, error) { + result, err := clientset.ResourceV1().ResourceClaims(pod.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for i := range result.Items { + claim := &result.Items[i] + for _, owner := range claim.OwnerReferences { + if owner.Name == pod.Name && owner.UID == pod.UID { + return claim, nil + } + } + } + return nil, nil +} + +func PodRunningReady(ctx context.Context, t testing.TB, clientset kubernetes.Interface, component, namespace string, options metav1.ListOptions) error { + client := clientset.CoreV1().Pods(namespace) + result, err := client.List(ctx, options) + if err != nil || len(result.Items) == 0 { + return fmt.Errorf("[%s] still waiting for pod to show up - %w", component, err) + } + + for _, pod := range result.Items { + ready, err := testutils.PodRunningReady(&pod) + if err != nil || !ready { + err := fmt.Errorf("[%s] still waiting for pod: %s to be ready: %v", component, pod.Name, err) + t.Log(err.Error()) + return err + } + t.Logf("[%s] pod: %s ready", component, pod.Name) + } + return nil +} + +func GetAllocatedDeviceForRequest(request string, claim *resourceapi.ResourceClaim) (string, string, error) { + // the request must exist in the spec + found := false + for _, dr := range claim.Spec.Devices.Requests { + if dr.Name == request { + found = true + } + } + if !found { + return "", "", fmt.Errorf("the request does not exist in the claim") + } + + allocation := claim.Status.Allocation + if allocation == nil { + return "", "", fmt.Errorf("the given claim has not been allocated yet") + } + + for _, r := range allocation.Devices.Results { + if r.Request == request { + return r.Device, r.Pool, nil + } + } + return "", "", nil +} + +func ExecIntoContainer(ctx context.Context, t testing.TB, f *framework.Framework, name, namespace, container string, cmd []string) ([]string, error) { + t.Logf(fmt.Sprintf("exec into pod: %s, container: %s, command: %v", name, container, cmd)) + stdout, stderr, err := e2epodutil.ExecWithOptionsContext(ctx, f, e2epodutil.ExecOptions{ + Command: cmd, + Namespace: namespace, + PodName: name, + ContainerName: container, + CaptureStdout: true, + CaptureStderr: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to run command %v on pod %s, stdout: %v, stderr: %v, err: %w", cmd, name, stdout, stderr, err) + } + t.Logf("output of pod exec: %s/%s (container=%s):\n\n%s\n%s", namespace, name, container, stdout, stderr) + lines := []string{} + sc := bufio.NewScanner(strings.NewReader(stdout)) + for sc.Scan() { + lines = append(lines, sc.Text()) + } + return lines, nil +} + +func EnsureNamespaceLabel(ctx context.Context, clientset kubernetes.Interface, ns string, key, want string) error { + current, err := clientset.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) + if err != nil { + return err + } + if value, ok := current.Labels[key]; ok && value == want { + return nil + } + + if len(current.Labels) == 0 { + current.Labels = map[string]string{} + } + current.Labels[key] = want + _, err = clientset.CoreV1().Namespaces().Update(ctx, current, metav1.UpdateOptions{}) + return err +} diff --git a/test/extended/dra/mig_spec.go b/test/extended/dra/mig_spec.go new file mode 100644 index 000000000000..00cff0f8b1eb --- /dev/null +++ b/test/extended/dra/mig_spec.go @@ -0,0 +1,140 @@ +package dra + +import ( + "context" + "fmt" + "strings" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// exercises a use case with static MIG devices +// reference: +// - https://docs.nvidia.com/datacenter/tesla/mig-user-guide/ +// - https://github.com/NVIDIA/mig-parted +type gpuMIGSpec struct { + f *framework.Framework + class string + // the node onto which the pod is expected to run + node *corev1.Node + // MIG devices, same MIG device can appear twice or more + devices []string + // UUIDs of the MIG devices advertised by the gpu driver + uuids []string +} + +// One pod, N containers, each asking for a MIG device on a shared mig-enabled GPU +func (spec gpuMIGSpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + clientset := spec.f.ClientSet + + // create a resource claim template that contains a request for each mig device + template := &resourceapi.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "mig-devices", + }, + } + for i, device := range spec.devices { + name := fmt.Sprintf("%s-%d", strings.ReplaceAll(device, ".", "-"), i) + template.Spec.Spec.Devices.Requests = append(template.Spec.Spec.Devices.Requests, resourceapi.DeviceRequest{ + Name: name, + Exactly: &resourceapi.ExactDeviceRequest{ + DeviceClassName: "mig.nvidia.com", + Selectors: []resourceapi.DeviceSelector{ + { + CEL: &resourceapi.CELDeviceSelector{ + Expression: "device.attributes['" + spec.class + "'].profile == '" + device + "'", + }, + }, + }, + }, + }) + } + template.Spec.Spec.Devices.Constraints = []resourceapi.DeviceConstraint{ + { + MatchAttribute: ptr.To(resourceapi.FullyQualifiedName(spec.class + "/parentUUID")), + }, + } + + // one pod, N container(s), each wants a MIG device + pod := helper.NewPod(namespace, "pod") + for i, request := range template.Spec.Spec.Devices.Requests { + ctr := corev1.Container{ + Name: fmt.Sprintf("ctr%d", i), + Image: "ubuntu:22.04", + Command: []string{"bash", "-c"}, + Args: []string{"nvidia-smi -L; trap 'exit 0' TERM; sleep 9999 & wait"}, + } + ctr.Resources.Claims = []corev1.ResourceClaim{{Name: "mig-devices", Request: request.Name}} + pod.Spec.Containers = append(pod.Spec.Containers, ctr) + } + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "mig-devices", + ResourceClaimTemplateName: ptr.To(template.Name), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + + g.By("creating external claim and pod") + _, err := clientset.ResourceV1().ResourceClaimTemplates(namespace).Create(ctx, template, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + pod, err = clientset.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pod)) + + result, err := clientset.ResourceV1().ResourceClaims(namespace).List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(result)) + }) + + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, spec.f.ClientSet, pod) + o.Expect(err).To(o.BeNil()) + + // the pod should run on the expected node + pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := helper.GetResourceClaimFor(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + + o.Expect(claim.Status.Allocation).NotTo(o.BeNil()) + o.Expect(len(claim.Status.Allocation.Devices.Results)).To(o.Equal(len(spec.devices))) + + migUsed := nvidia.NvidiaGPUs{} + for _, ctr := range pod.Spec.Containers { + g.By(fmt.Sprintf("running nvidia-smi command into pod %s/%s container: %s", pod.Namespace, pod.Name, ctr.Name)) + lines, err := helper.ExecIntoContainer(ctx, t, spec.f, pod.Name, pod.Namespace, ctr.Name, + []string{"nvidia-smi", "-L"}) + o.Expect(err).To(o.BeNil()) + got := nvidia.ExtractMIGDeviceInfoFromNvidiaSMILines(lines) + migUsed = append(migUsed, got...) + } + o.Expect(migUsed.UUIDs()).To(o.Equal(spec.uuids)) +} diff --git a/test/extended/dra/mps_spec.go b/test/extended/dra/mps_spec.go new file mode 100644 index 000000000000..986a2d2c393a --- /dev/null +++ b/test/extended/dra/mps_spec.go @@ -0,0 +1,209 @@ +package dra + +import ( + "context" + "fmt" + "strings" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// references: +// - https://github.com/NVIDIA/k8s-dra-driver-gpu/blob/main/demo/specs/quickstart/gpu-test-mps.yaml +// - https://docs.nvidia.com/deploy/mps/index.html +// +// MPS with CUDA +// One pod, 2 containers share GPU using MPS +type mpsWithCUDASpec struct { + f *framework.Framework + class string + // the node onto which the pod is expected to run + node *corev1.Node + dra *nvidia.NvidiaDRADriverGPU + driver *nvidia.GpuOperator +} + +func (spec mpsWithCUDASpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + clientset := spec.f.ClientSet + + // MPS strategy + const ( + mpsStrategy = `{ + "apiVersion": "resource.nvidia.com/v1beta1", + "kind": "GpuConfig", + "sharing": { + "strategy": "MPS", + "mpsConfig": { + "defaultActiveThreadPercentage": 50, + "defaultPinnedDeviceMemoryLimit": "10Gi" + } + } +} +` + ) + template := &resourceapi.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "shared-gpu", + }, + } + template.Spec.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + { + Name: "mps-gpu", + Exactly: &resourceapi.ExactDeviceRequest{DeviceClassName: spec.class}, + }, + } + template.Spec.Spec.Devices.Config = []resourceapi.DeviceClaimConfiguration{ + { + Requests: []string{"mps-gpu"}, + DeviceConfiguration: resourceapi.DeviceConfiguration{ + Opaque: &resourceapi.OpaqueDeviceConfiguration{ + Driver: spec.class, + Parameters: runtime.RawExtension{Raw: []byte(mpsStrategy)}, + }, + }, + }, + } + + newContainer := func(name string) corev1.Container { + ctr := corev1.Container{ + Name: name, + Image: "nvcr.io/nvidia/k8s/cuda-sample:nbody-cuda11.6.0-ubuntu18.04", + Command: []string{"bash", "-c"}, + Args: []string{"trap 'exit 0' TERM; /tmp/sample --benchmark --numbodies=4226048 & wait"}, + } + ctr.Resources.Claims = []corev1.ResourceClaim{{Name: "shared-gpu", Request: "mps-gpu"}} + return ctr + } + // one pod, two containers, both containers share a single gpu + pod := helper.NewPod(namespace, "pod") + pod.Spec.Containers = []corev1.Container{ + newContainer("mps-ctr0"), + newContainer("mps-ctr1"), + } + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "shared-gpu", + ResourceClaimTemplateName: ptr.To(template.Name), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + + g.By("creating external claim and pod") + t.Logf("creating resource template: \n%s\n", framework.PrettyPrintJSON(template)) + _, err := clientset.ResourceV1().ResourceClaimTemplates(namespace).Create(ctx, template, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + t.Logf("creating pod: \n%s\n", framework.PrettyPrintJSON(pod)) + pod, err = clientset.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pod)) + + result, err := clientset.ResourceV1().ResourceClaims(namespace).List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(result)) + }) + + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + + // the pod should run on the expected node + pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := helper.GetResourceClaimFor(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + + device, pool, err := helper.GetAllocatedDeviceForRequest("mps-gpu", claim) + o.Expect(err).To(o.BeNil()) + o.Expect(device).ToNot(o.BeEmpty()) + o.Expect(pool).ToNot(o.BeEmpty()) + t.Logf("device %q has been allocated to claim: %q", device, claim.Name) + + // get the index of the gpu allocated to us + gpu, err := spec.dra.GetGPUFromResourceSlice(ctx, spec.node, device) + o.Expect(err).To(o.BeNil()) + o.Expect(gpu.UUID).ToNot(o.BeEmpty()) + o.Expect(gpu.Index).ToNot(o.BeEmpty()) + + // we expect at least three processes to be running + // a) two instances of "/tmp/sample" type: M+C + // b) one instance of "nvidia-cuda-mps-server" type: C + const ( + sample = "/tmp/sample" + server = "nvidia-cuda-mps-server" + ) + processes, err := spec.driver.QueryCompute(ctx, spec.node, gpu.Index) + o.Expect(err).To(o.BeNil()) + o.Expect(len(processes)).To(o.BeNumerically(">=", 3)) + + names := processes.FilterBy(func(p nvidia.NvidiaCompute) bool { return p.Name == sample || p.Name == server }).Names() + o.Expect(names).To(o.ConsistOf([]string{sample, sample, server})) + + // TODO: nvidia-smi --query-compute-apps does not recognize a key to + // list the process type, for now we run nvidia-smi + lines, err := spec.driver.RunNvidiSMI(ctx, spec.node) + o.Expect(err).To(o.BeNil()) + samples := []string{} + for _, line := range lines { + if strings.Contains(line, sample) { + samples = append(samples, line) + } + } + o.Expect(len(samples)).To(o.Equal(2)) + o.Expect(samples).Should(o.HaveExactElements(o.ContainSubstring("M+C"), o.ContainSubstring("M+C"))) + + g.By("capturing MPS control daemon logs") + mpsControlPod, err := spec.dra.GetMPSControlDaemonForClaim(ctx, spec.node, claim) + o.Expect(err).To(o.BeNil()) + o.Expect(mpsControlPod).ToNot(o.BeNil()) + t.Logf("MPS control daemon pod: %s", mpsControlPod.Name) + logs, err := helper.GetLogs(ctx, clientset, mpsControlPod.Namespace, mpsControlPod.Name, "mps-control-daemon") + o.Expect(err).To(o.BeNil()) + t.Logf("logs from MPS daemon control pod, container: %s\n%s\n", "mps-control-daemon", logs) + + // chroot /driver-root sh -c "echo get_default_active_thread_percentage | nvidia-cuda-mps-control" + lines, err = helper.ExecIntoContainer(ctx, t, spec.f, mpsControlPod.Name, mpsControlPod.Namespace, + "mps-control-daemon", + []string{"chroot", "/driver-root", "sh", "-c", "echo get_default_active_thread_percentage | nvidia-cuda-mps-control"}) + o.Expect(err).To(o.BeNil()) + o.Expect(len(lines)).To(o.Equal(1)) + o.Expect(lines[0]).To(o.Equal("50.0")) + + // TODO: memory limit does not show up as expected + _, err = helper.ExecIntoContainer(ctx, t, spec.f, pod.Name, pod.Namespace, + "mps-control-daemon", + []string{"chroot", "/driver-root", "sh", "-c", "echo get_default_device_pinned_mem_limit 0 | nvidia-cuda-mps-control"}) + + g.By(fmt.Sprintf("retrieving logs for pod %s/%s", pod.Namespace, pod.Name)) + for _, ctr := range pod.Spec.Containers { + logs, err := helper.GetLogs(ctx, spec.f.ClientSet, pod.Namespace, pod.Name, ctr.Name) + o.Expect(err).To(o.BeNil()) + t.Logf("logs from container: %s\n%s\n", ctr.Name, logs) + } +} diff --git a/test/extended/dra/nvidia/drivertoolkit.go b/test/extended/dra/nvidia/drivertoolkit.go new file mode 100644 index 000000000000..4061fab5f400 --- /dev/null +++ b/test/extended/dra/nvidia/drivertoolkit.go @@ -0,0 +1,56 @@ +package nvidia + +import ( + "bufio" + "fmt" + "strings" + "testing" + + g "github.com/onsi/ginkgo/v2" + + corev1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + + exutil "github.com/openshift/origin/test/extended/util" +) + +func NewDriverToolkitProber(oc *exutil.CLI, clientset clientset.Interface) *DriverToolkit { + return &DriverToolkit{oc: oc, clientset: clientset} +} + +type DriverToolkit struct { + oc *exutil.CLI + clientset clientset.Interface +} + +type OSReleaseInfo struct { + RHCOSVersion string +} + +func (dtk DriverToolkit) GetOSReleaseInfo(t testing.TB, node *corev1.Node) (OSReleaseInfo, error) { + args := []string{ + fmt.Sprintf("node/%s", node.Name), "--quiet", "--", "cat", "/host/etc/os-release", + } + g.By(fmt.Sprintf("calling oc debug %v", args)) + + // TODO: can we get it using the API, if the information is stored in an object + // it can be flaky, maybe add a retry + out, err := dtk.oc.AsAdmin().Run("debug").Args(args...).Output() + if err != nil { + return OSReleaseInfo{}, fmt.Errorf("failed to debug into %s - %w", node.Name, err) + } + t.Logf("output of oc debug:\n%s\n", out) + + sc := bufio.NewScanner(strings.NewReader(out)) + for sc.Scan() { + after, found := strings.CutPrefix(strings.TrimSpace(sc.Text()), "OSTREE_VERSION=") + if !found { + continue + } + return OSReleaseInfo{ + RHCOSVersion: strings.Trim(strings.TrimSpace(after), "'"), + }, nil + } + + return OSReleaseInfo{}, fmt.Errorf("OSTREE_VERSION not found in output") +} diff --git a/test/extended/dra/nvidia/gpu-operator.go b/test/extended/dra/nvidia/gpu-operator.go new file mode 100644 index 000000000000..a2aac1ffb6b3 --- /dev/null +++ b/test/extended/dra/nvidia/gpu-operator.go @@ -0,0 +1,459 @@ +package nvidia + +import ( + "bufio" + "context" + "fmt" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + helper "github.com/openshift/origin/test/extended/dra/helper" +) + +func NewGPUOperatorInstaller(t testing.TB, clientset kubernetes.Interface, f *framework.Framework, p helper.HelmParameters) *GpuOperator { + return &GpuOperator{ + t: t, + clientset: clientset, + installer: helper.NewHelmInstaller(t, p), + f: f, + } +} + +func DefaultNFDHelmValues() map[string]any { + return map[string]any{ + "worker": map[string]any{ + "config": map[string]any{ + "sources": map[string]any{ + "pci": map[string]any{ + "deviceLabelFields": []string{"vendor"}, + }, + "custom": []map[string]any{ + { + "name": "nvidia-gpu-testing", + "labels": map[string]any{ + "nvidia.com": true, + }, + "matchFeatures": []map[string]any{ + { + "feature": "pci.device", + "matchExpressions": map[string]any{ + "class": map[string]any{ + "op": "In", + "value": []string{"0302"}, + }, + "vendor": map[string]any{ + "op": "In", + "value": []string{"10de"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func DefaultGPUOperatorHelmValues() map[string]any { + return map[string]any{ + "devicePlugin": map[string]any{ + // we will use nvidia DRA driver, so disable the plugin + "enabled": false, + }, + "driver": map[string]any{ + "version": "570.148.08", + }, + "cdi": map[string]any{ + "enabled": true, + }, + "toolkit": map[string]any{ + "version": "v1.17.8-ubi8", + }, + "nfd": map[string]any{ + // we hav already installed nfd with custom configuration + "enabled": false, + }, + "platform": map[string]any{ + "openshift": true, + }, + "operator": map[string]any{ + "use_ocp_driver_toolkit": true, + "logging": map[string]any{ + "level": "debug", + }, + }, + } +} + +func DefaultDRADriverHelmValues() map[string]any { + return map[string]any{ + "nvidiaDriverRoot": "/run/nvidia/driver", + "gpuResourcesEnabledOverride": true, + // the controller can run on the master node + "controller": map[string]any{ + "tolerations": []map[string]any{ + { + "key": "node-role.kubernetes.io/master", + "operator": "Exists", + "effect": "NoSchedule", + }, + }, + "affinity": map[string]any{ + "nodeAffinity": map[string]any{ + "requiredDuringSchedulingIgnoredDuringExecution": map[string]any{ + "nodeSelectorTerms": []map[string]any{ + { + "matchExpressions": []map[string]any{ + { + "key": "node-role.kubernetes.io/master", + "operator": "Exists", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +type GpuOperator struct { + t testing.TB + f *framework.Framework + // we don't depend on the clientset from f, esp the clean up code + clientset kubernetes.Interface + installer *helper.HelmInstaller + timeout time.Duration +} + +func (d *GpuOperator) Namespace() string { return d.installer.Namespace } + +func (d *GpuOperator) Install(ctx context.Context) error { + const ( + enforceKey = "pod-security.kubernetes.io/enforce" + enforceValue = "privileged" + ) + client := d.clientset.CoreV1().Namespaces() + current, err := client.Get(ctx, d.installer.Namespace, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + want := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.installer.Namespace, + Labels: map[string]string{ + enforceKey: enforceValue, + }, + }, + } + current, err = client.Create(ctx, want, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("error creating namespace: %w", err) + } + case err != nil: + return fmt.Errorf("error retrieving namespace: %w", err) + } + + if v, ok := current.Labels[enforceKey]; !ok || v != enforceValue { + want := current.DeepCopy() + if len(want.Labels) == 0 { + want.Labels = map[string]string{} + } + want.Labels[enforceKey] = enforceValue + _, err = client.Update(ctx, want, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("error updating namespace: %w", err) + } + } + + return d.installer.Install(ctx) +} + +func (d *GpuOperator) Cleanup(ctx context.Context) error { return d.installer.Remove(ctx) } + +func (d GpuOperator) Ready(ctx context.Context, node *corev1.Node) error { + for _, probe := range []struct { + component string + enabled bool + options metav1.ListOptions + }{ + { + enabled: true, + component: "nvidia-driver-daemonset", + options: metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/component" + "=" + "nvidia-driver", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }, + }, + { + enabled: true, + component: "nvidia-container-toolkit-daemonset", + options: metav1.ListOptions{ + LabelSelector: "app" + "=" + "nvidia-container-toolkit-daemonset", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }, + }, + { + enabled: true, + component: "gpu-feature-discovery-daemonset", + options: metav1.ListOptions{ + LabelSelector: "app" + "=" + "gpu-feature-discovery", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }, + }, + } { + if probe.enabled { + g.By(fmt.Sprintf("waiting for %s to be ready", probe.component)) + o.Eventually(ctx, func(ctx context.Context) error { + return helper.PodRunningReady(ctx, d.t, d.clientset, probe.component, d.installer.Namespace, probe.options) + }).WithPolling(5*time.Second).Should(o.BeNil(), fmt.Sprintf("[%s] pod should be ready", probe.component)) + } + } + + return nil +} + +func (d GpuOperator) MIGManagerReady(ctx context.Context, node *corev1.Node) error { + for _, probe := range []struct { + component string + enabled bool + options metav1.ListOptions + }{ + { + enabled: true, + component: "nvidia-mig-manager-daemonset", + options: metav1.ListOptions{ + LabelSelector: "app" + "=" + "nvidia-mig-manager", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }, + }, + } { + if probe.enabled { + g.By(fmt.Sprintf("waiting for %s to be ready", probe.component)) + o.Eventually(ctx, func(ctx context.Context) error { + return helper.PodRunningReady(ctx, d.t, d.clientset, probe.component, d.installer.Namespace, probe.options) + }).WithPolling(5*time.Second).Should(o.BeNil(), fmt.Sprintf("[%s] pod should be ready", probe.component)) + } + } + + return nil +} + +func (d GpuOperator) DiscoverGPUProudct(ctx context.Context, node *corev1.Node) (string, error) { + client := d.clientset.CoreV1().Pods(d.installer.Namespace) + result, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: "app" + "=" + "gpu-feature-discovery", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return "", fmt.Errorf("did not find any pod for %s on node: %s - %w", "gpu-feature-discovery", node.Name, err) + } + pod := result.Items[0].Name + + cmd := []string{"cat", "/etc/kubernetes/node-feature-discovery/features.d/gfd"} + g.By(fmt.Sprintf("exec into pod: %s command: %v", pod, cmd)) + stdout, stderr, err := e2epod.ExecWithOptionsContext(ctx, d.f, e2epod.ExecOptions{ + Command: cmd, + Namespace: d.installer.Namespace, + PodName: pod, + ContainerName: "gpu-feature-discovery", + CaptureStdout: true, + CaptureStderr: true, + }) + if err != nil { + return "", fmt.Errorf("failed to run command %v on pod %s, stdout: %v, stderr: %v, err: %w", cmd, pod, stdout, stderr, err) + } + d.t.Logf("output of pod exec: %s:\n%s\n", pod, stdout) + sc := bufio.NewScanner(strings.NewReader(stdout)) + for sc.Scan() { + after, found := strings.CutPrefix(strings.TrimSpace(sc.Text()), "nvidia.com/gpu.product=") + if !found { + continue + } + return strings.Trim(strings.TrimSpace(after), "'"), nil + } + + return "", fmt.Errorf("nvidia.com/gpu.product not found in output") +} + +func (d GpuOperator) RunNvidiSMI(ctx context.Context, node *corev1.Node, options ...string) ([]string, error) { + client := d.clientset.CoreV1().Pods(d.installer.Namespace) + result, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/component" + "=" + "nvidia-driver", + FieldSelector: "spec.nodeName" + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return nil, fmt.Errorf("did not find any pod for %s on node: %s - %w", "nvidia-driver-daemonset", node.Name, err) + } + pod := result.Items[0].Name + + cmd := []string{"nvidia-smi"} + cmd = append(cmd, options...) + g.By(fmt.Sprintf("exec into pod: %s, command: %v", pod, cmd)) + stdout, stderr, err := e2epod.ExecWithOptionsContext(ctx, d.f, e2epod.ExecOptions{ + Command: cmd, + Namespace: d.installer.Namespace, + PodName: pod, + ContainerName: "nvidia-driver-ctr", + CaptureStdout: true, + CaptureStderr: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to run command %v on pod %s, stdout: %v, stderr: %v, err: %w", cmd, pod, stdout, stderr, err) + } + d.t.Logf("output of pod exec: %s:\n%s\n", pod, stdout) + sc := bufio.NewScanner(strings.NewReader(stdout)) + lines := []string{} + for sc.Scan() { + lines = append(lines, sc.Text()) + } + return lines, nil +} + +func (d GpuOperator) ListMIGDevicesUsingNvidiaSMI(ctx context.Context, node *corev1.Node) (NvidiaGPUs, error) { + lines, err := d.RunNvidiSMI(ctx, node, "-L") + if err != nil { + return nil, err + } + return ExtractMIGDeviceInfoFromNvidiaSMILines(lines), nil +} + +func ExtractMIGDeviceInfoFromNvidiaSMILines(lines []string) NvidiaGPUs { + gpus := NvidiaGPUs{} + for _, line := range lines { + gpu := ExtractMIGDeviceInfoFromNvidiaSMI(line) + if gpu.Type != "mig" { + continue + } + gpus = append(gpus, gpu) + } + return gpus +} + +func ExtractMIGDeviceInfoFromNvidiaSMI(line string) NvidiaGPU { + // example line + // GPU 0: NVIDIA A100-SXM4-40GB (UUID: GPU-fcf41002-68b6-5900-d7d1-74026173bb44) + // MIG 3g.20gb Device 0: (UUID: MIG-e07a497d-1bb6-5a42-b670-1c33bf55ab6e) + gpu := NvidiaGPU{} + after, found := strings.CutPrefix(strings.TrimSpace(line), "MIG ") + if !found { + return gpu + } + gpu.Type = "mig" + + split := strings.Split(after, " ") + gpu.Name = split[0] + if len(split) > 0 { + gpu.UUID = strings.TrimRight(split[len(split)-1], ")") + } + return gpu +} + +func QueryGPUUsedByContainer(ctx context.Context, t testing.TB, f *framework.Framework, name, namespace, container string) (NvidiaGPUs, error) { + cmd := []string{"nvidia-smi", "--query-gpu=index,uuid", "--format=csv"} + t.Logf("exec into pod: %s, container: %s, command: %v", name, container, cmd) + stdout, stderr, err := e2epod.ExecWithOptionsContext(ctx, f, e2epod.ExecOptions{ + Command: cmd, + Namespace: namespace, + PodName: name, + ContainerName: container, + CaptureStdout: true, + CaptureStderr: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to run command %v on pod %s, stdout: %v, stderr: %v, err: %w", cmd, name, stdout, stderr, err) + } + t.Logf("output of pod exec: %s/%s (container=%s):\n\n%s\n%s", namespace, name, container, stdout, stderr) + gpus := NvidiaGPUs{} + sc := bufio.NewScanner(strings.NewReader(stdout)) + var ignored bool + for sc.Scan() { + s := strings.Split(sc.Text(), ",") + // ignore the first line, it's the header + if !ignored { + ignored = true + continue + } + if len(s) != 2 { + continue + } + gpus = append(gpus, NvidiaGPU{ + Index: strings.TrimSpace(s[0]), + UUID: strings.TrimSpace(s[1]), + }) + } + return gpus, nil +} + +func (d GpuOperator) QueryCompute(ctx context.Context, node *corev1.Node, gpuIndex string) (NvidiaComputes, error) { + options := []string{"--query-compute-apps=gpu_uuid,pid,process_name", "--format=csv"} + lines, err := d.RunNvidiSMI(ctx, node, options...) + if err != nil { + return nil, err + } + + processes := []NvidiaCompute{} + var firsLineIngonred bool + for _, line := range lines { + // ignore the first line, it's the header + if !firsLineIngonred { + firsLineIngonred = true + continue + } + s := strings.Split(line, ",") + if len(s) != 3 { + continue + } + processes = append(processes, NvidiaCompute{ + GPU: strings.TrimSpace(s[0]), + PID: strings.TrimSpace(s[1]), + Name: strings.TrimSpace(s[2]), + }) + } + return processes, nil +} + +type NvidiaCompute struct { + // process name, and pid + Name string + PID string + // UUID of the GPU on which this process is running + GPU string +} + +func (c NvidiaCompute) String() string { + return fmt.Sprintf("gpu: %s, name: %s, pid: %s", c.GPU, c.Name, c.PID) +} + +type NvidiaComputes []NvidiaCompute + +func (s NvidiaComputes) FilterBy(f func(p NvidiaCompute) bool) NvidiaComputes { + processes := NvidiaComputes{} + for _, p := range s { + if f(p) { + processes = append(processes, p) + } + } + return processes +} + +func (s NvidiaComputes) Names() []string { + names := []string{} + for _, p := range s { + names = append(names, p.Name) + } + return names +} diff --git a/test/extended/dra/nvidia/nvidia-dra-driver.go b/test/extended/dra/nvidia/nvidia-dra-driver.go new file mode 100644 index 000000000000..780d74c49865 --- /dev/null +++ b/test/extended/dra/nvidia/nvidia-dra-driver.go @@ -0,0 +1,220 @@ +package nvidia + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + helper "github.com/openshift/origin/test/extended/dra/helper" +) + +func NewNvidiaDRADriverGPU(t testing.TB, clientset kubernetes.Interface, p helper.HelmParameters) *NvidiaDRADriverGPU { + return &NvidiaDRADriverGPU{ + t: t, + name: "nvidia-dra-driver-gpu", + class: "gpu.nvidia.com", + namespace: p.Namespace, + clientset: clientset, + helm: helper.NewHelmInstaller(t, p), + } +} + +type NvidiaDRADriverGPU struct { + t testing.TB + name string + class string + helm *helper.HelmInstaller + clientset kubernetes.Interface + namespace string +} + +func (d *NvidiaDRADriverGPU) Class() string { return "gpu.nvidia.com" } +func (d *NvidiaDRADriverGPU) Setup(ctx context.Context) error { return d.helm.Install(ctx) } +func (d *NvidiaDRADriverGPU) Cleanup(ctx context.Context) error { return d.helm.Remove(ctx) } +func (d *NvidiaDRADriverGPU) Ready(ctx context.Context, node *corev1.Node) error { + for _, probe := range []struct { + component string + enabled bool + options metav1.ListOptions + }{ + { + enabled: true, + component: d.name, + options: metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name" + "=" + d.name, + FieldSelector: "spec.nodeName" + "=" + node.Name, + }, + }, + } { + if probe.enabled { + g.By(fmt.Sprintf("waiting for %s to be ready", probe.component)) + o.Eventually(func() error { + return helper.PodRunningReady(ctx, d.t, d.clientset, probe.component, d.helm.Namespace, probe.options) + }).WithPolling(5*time.Second). + WithTimeout(10*time.Minute).Should(o.BeNil(), fmt.Sprintf("[%s] pod should be ready", probe.component)) + } + } + + return nil +} + +func (d *NvidiaDRADriverGPU) EventuallyPublishResources(ctx context.Context, node *corev1.Node) (dc *resourceapi.DeviceClass, slices []resourceapi.ResourceSlice) { + o.Eventually(ctx, func(ctx context.Context) error { + // has the driver published the device class? + obj, err := d.clientset.ResourceV1().DeviceClasses().Get(ctx, d.class, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("still waiting for the driver to advertise its DeviceClass") + } + + result, err := d.clientset.ResourceV1().ResourceSlices().List(ctx, metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.class + "," + + resourceapi.ResourceSliceSelectorNodeName + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return fmt.Errorf("still waiting for the driver to advertise its ResourceSlice - %w", err) + } + dc = obj + slices = result.Items + return nil + }).WithPolling(2*time.Second).Should(o.BeNil(), "timeout while waiting for the driver to advertise its resources") + + return dc, slices +} + +func (d *NvidiaDRADriverGPU) RemovePluginFromNode(ctx context.Context, node *corev1.Node) error { + pods, err := d.clientset.CoreV1().Pods(d.helm.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name" + "=" + d.name, + FieldSelector: "spec.nodeName" + "=" + node.Name, + }) + if err != nil || len(pods.Items) == 0 { + return fmt.Errorf("[%s] expected a pod running on node: %s - %w", d.name, node.Name, err) + } + + pod := pods.Items[0] + if err := e2epod.DeletePodWithWait(context.Background(), d.clientset, &pod); err != nil { + return err + } + + slices, err := d.clientset.ResourceV1().ResourceSlices().List(ctx, metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.class + "," + + resourceapi.ResourceSliceSelectorNodeName + "=" + node.Name, + }) + if err != nil || len(slices.Items) == 0 { + return err + } + return d.clientset.ResourceV1().ResourceSlices().Delete(ctx, slices.Items[0].Name, metav1.DeleteOptions{}) +} + +func (d *NvidiaDRADriverGPU) GetGPUFromResourceSlice(ctx context.Context, node *corev1.Node, device string) (NvidiaGPU, error) { + devices, err := d.ListPublishedDevicesFromResourceSlice(ctx, node) + if err != nil { + return NvidiaGPU{}, err + } + if matching := devices.FilterBy(func(gpu NvidiaGPU) bool { return gpu.Name == device }); len(matching) > 0 { + return matching[0], nil + } + return NvidiaGPU{}, nil +} + +func (d *NvidiaDRADriverGPU) GetMPSControlDaemonForClaim(ctx context.Context, node *corev1.Node, claim *resourceapi.ResourceClaim) (*corev1.Pod, error) { + // MpsControlDaemonNameFmt = "mps-control-daemon-%v" // Fill with ClaimUID + // https://github.com/NVIDIA/k8s-dra-driver-gpu/blob/9951be59ac1bf827d02a1f0f3dfc0cea65c7aca3/cmd/gpu-kubelet-plugin/sharing.go#L55 + prefix := fmt.Sprintf("mps-control-daemon-%v", claim.UID) + d.t.Logf("MPS control daemon prefix: %s", prefix) + + // MPS control daemon is located in the same namespace as the driver + result, err := d.clientset.CoreV1().Pods(d.namespace).List(ctx, metav1.ListOptions{ + FieldSelector: "spec.nodeName" + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return nil, fmt.Errorf("did not find MPS control daemon pod for %s on node: %s - %w", claim.Name, node.Name, err) + } + + for i := range result.Items { + pod := &result.Items[i] + if strings.HasPrefix(pod.Name, prefix) { + return pod, nil + } + } + return nil, nil +} + +func (d *NvidiaDRADriverGPU) ListPublishedDevicesFromResourceSlice(ctx context.Context, node *corev1.Node) (NvidiaGPUs, error) { + result, err := d.clientset.ResourceV1().ResourceSlices().List(ctx, metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.class + "," + + resourceapi.ResourceSliceSelectorNodeName + "=" + node.Name, + }) + if err != nil || len(result.Items) == 0 { + return nil, fmt.Errorf("still waiting for the driver to advertise its ResourceSlice - %w", err) + } + + devices := NvidiaGPUs{} + for _, rs := range result.Items { + for _, got := range rs.Spec.Devices { + gpu := NvidiaGPU{Name: got.Name} + if attribute, ok := got.Attributes["type"]; ok { + gpu.Type = ptr.Deref[string](attribute.StringValue, "") + } + if attribute, ok := got.Attributes["uuid"]; ok { + gpu.UUID = ptr.Deref[string](attribute.StringValue, "") + } + if attribute, ok := got.Attributes["index"]; ok && attribute.IntValue != nil { + gpu.Index = strconv.Itoa(int(*attribute.IntValue)) + } + devices = append(devices, gpu) + } + } + return devices, nil +} + +type NvidiaGPU struct { + Type string + UUID string + Index string + Name string +} + +func (gpu NvidiaGPU) String() string { + return fmt.Sprintf("name: %s, type: %s, uuid: %s, index: %s", gpu.Name, gpu.Type, gpu.UUID, gpu.Index) +} + +type NvidiaGPUs []NvidiaGPU + +func (s NvidiaGPUs) FilterBy(f func(gpu NvidiaGPU) bool) NvidiaGPUs { + gpus := NvidiaGPUs{} + for _, gpu := range s { + if f(gpu) { + gpus = append(gpus, gpu) + } + } + return gpus +} + +func (s NvidiaGPUs) Names() []string { + names := []string{} + for _, gpu := range s { + names = append(names, gpu.Name) + } + return names +} + +func (s NvidiaGPUs) UUIDs() []string { + uuids := []string{} + for _, gpu := range s { + uuids = append(uuids, gpu.UUID) + } + return uuids +} diff --git a/test/extended/dra/specs/cuda-ipc/cuda-ipc-multiple-gpus.yaml b/test/extended/dra/specs/cuda-ipc/cuda-ipc-multiple-gpus.yaml new file mode 100644 index 000000000000..2234bac3c65c --- /dev/null +++ b/test/extended/dra/specs/cuda-ipc/cuda-ipc-multiple-gpus.yaml @@ -0,0 +1,436 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cuda-ipc + labels: + pod-security.kubernetes.io/enforce: privileged +--- +apiVersion: resource.k8s.io/v1 +kind: ResourceClaim +metadata: + namespace: cuda-ipc + name: single-gpu +spec: + devices: + requests: + - name: producer + deviceClassName: gpu.nvidia.com + selectors: + - cel: + expression: | + device.attributes['gpu.nvidia.com'].uuid == "GPU-d1a2be06-94f6-cd5f-0c15-b5558bcf3025" + - name: consumer + deviceClassName: gpu.nvidia.com + selectors: + - cel: + expression: | + device.attributes['gpu.nvidia.com'].uuid == "GPU-06410d4e-8bec-2933-dcc3-ac0497011913" +--- +apiVersion: v1 +kind: Pod +metadata: + name: producer + namespace: cuda-ipc + labels: + app: producer +spec: + hostIPC: true + hostPID: true + containers: + - name: producer + image: nvidia/cuda:12.4.1-devel-ubuntu22.04 + command: ["/bin/bash", "-c"] + startupProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 15 + failureThreshold: 12 + periodSeconds: 10 + env: + args: + - | + echo "Producer: Listing available GPUs with nvidia-smi..." + nvidia-smi -L + echo "Producer: GPU UUIDs:" + nvidia-smi --query-gpu=uuid --format=csv,noheader + echo "Producer: GPU information complete." + echo "" + + cat > /tmp/producer.cu << 'EOF' + #include + #include + #include + #include + + int main() { + void* devPtr; + cudaIpcMemHandle_t handle; + int deviceCount; + + printf("Producer: Initializing CUDA...\n"); + fflush(stdout); + + // Check available GPUs + cudaError_t err = cudaGetDeviceCount(&deviceCount); + if (err != cudaSuccess) { + printf("ERROR getting device count: %s\n", cudaGetErrorString(err)); + return 1; + } + printf("Producer: Found %d GPU(s)\n", deviceCount); + + // List all available GPUs + for (int i = 0; i < deviceCount; i++) { + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, i); + printf("Producer: GPU %d: %s\n", i, prop.name); + } + fflush(stdout); + + err = cudaSetDevice(0); + if (err != cudaSuccess) { + printf("ERROR setting CUDA device: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Allocating GPU memory on device 0...\n"); + fflush(stdout); + err = cudaMalloc(&devPtr, 1024 * 1024); // 1MB + if (err != cudaSuccess) { + printf("ERROR allocating GPU memory: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Writing test data to GPU memory...\n"); + fflush(stdout); + int* hostData = (int*)malloc(1024 * 1024); + for (int i = 0; i < 256 * 1024; i++) { + hostData[i] = i + 42; // Simple pattern: index + 42 + } + err = cudaMemcpy(devPtr, hostData, 1024 * 1024, cudaMemcpyHostToDevice); + if (err != cudaSuccess) { + printf("ERROR copying data to GPU: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Creating IPC handle...\n"); + fflush(stdout); + err = cudaIpcGetMemHandle(&handle, devPtr); + if (err != cudaSuccess) { + printf("ERROR creating IPC handle: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Producer: Writing handle to shared volume...\n"); + fflush(stdout); + FILE* f = fopen("/shared/cuda_ipc_handle.dat", "wb"); + if (!f) { + printf("ERROR: Could not open handle file for writing\n"); + return 1; + } + fwrite(&handle, sizeof(handle), 1, f); + fclose(f); + + printf("Producer: Success! Memory contains values 42, 43, 44, 45, 46...\n"); + printf("Producer: Hanging infinitely to keep GPU memory alive...\n"); + fflush(stdout); + + // signal that the producer has created the IPC handle + FILE* ready = fopen("/tmp/ready", "w"); + if (!ready) { + printf("ERROR: Could not open liveness file for writing\n"); + return 1; + } + fclose(ready); + + // Hang forever to keep the GPU memory allocated + while (1) { + sleep(3600); + } + + return 0; + } + EOF + + echo "Compiling producer..." + nvcc /tmp/producer.cu -o /tmp/producer + echo "Starting producer..." + /tmp/producer + resources: + claims: + - name: gpu + request: producer + securityContext: + privileged: true + capabilities: + add: ["IPC_LOCK"] + volumeMounts: + - name: shared-volume + mountPath: /shared + + resourceClaims: + - name: gpu + resourceClaimName: single-gpu + + volumes: + - name: shared-volume + hostPath: + path: /tmp/cuda-ipc-shared-dra + type: DirectoryOrCreate + + tolerations: + - key: "nvidia.com/gpu" + operator: "Exists" + effect: "NoSchedule" + + restartPolicy: Never +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: cuda-ipc + name: consumer + labels: + app: consumer +spec: + hostIPC: true + hostPID: true + containers: + - name: consumer + image: nvidia/cuda:12.4.1-devel-ubuntu22.04 + command: ["/bin/bash", "-c"] + startupProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 15 + failureThreshold: 12 + periodSeconds: 10 + env: + - name: GPU_UUID + value: "06410d4e-8bec-2933-dcc3-ac0497011913" + args: + - | + echo "Consumer: Waiting for producer to create handle..." + + # Wait for the handle file to be created + while [ ! -f /shared/cuda_ipc_handle.dat ]; do + echo "Consumer: Waiting for handle file..." + sleep 2 + done + + echo "Consumer: Handle file found!" + echo "Consumer: Listing available GPUs with nvidia-smi..." + nvidia-smi -L + echo "Consumer: GPU UUIDs:" + nvidia-smi --query-gpu=uuid --format=csv,noheader + echo "Consumer: GPU information complete." + echo "" + sleep 2 + + cat > /tmp/consumer.cu << 'EOF' + #include + #include + #include + #include + + // Helper function to print a cudaUUID_t for verification + void print_uuid(const cudaUUID_t uuid) { + for (int i = 0; i < 16; i++) { + printf("%02x", (unsigned char)uuid.bytes[i]); + if (i == 3 || i == 5 || i == 7 || i == 9) { + printf("-"); + } + } + printf("\n"); + } + + int convert_to_uuid(const char* src, unsigned char* dest) { + // Use sscanf to parse the UUID string with hyphens + int bytes_read; + bytes_read = sscanf(src, + "%02hhx%02hhx%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx-" + "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", + &dest[0], &dest[1], &dest[2], &dest[3], + &dest[4], &dest[5], + &dest[6], &dest[7], + &dest[8], &dest[9], + &dest[10], &dest[11], &dest[12], &dest[13], &dest[14], &dest[15]); + return bytes_read; + } + + // Helper function to compare two cudaUUID_t structures + bool uuid_equal(cudaUUID_t a, cudaUUID_t b) { + for (int i = 0; i < sizeof(a.bytes); i++) { + if (a.bytes[i] != b.bytes[i]) { + return false; + } + } + return true; + } + + int main() { + const char* uuid_env_var = getenv("GPU_UUID"); + if (uuid_env_var == NULL) { + printf("Error: Environment variable 'GPU_UUID' not set.\n"); + return 1; + } + printf("Read GPU UUID from env: %s\n", uuid_env_var); + + cudaUUID_t uuid; + // A pointer to the byte array inside the cudaUUID_t struct + unsigned char* uuid_bytes = (unsigned char*)uuid.bytes; + int bytes_read; + bytes_read = convert_to_uuid(uuid_env_var, uuid_bytes); + if (bytes_read != 16) { + printf("Error: Failed to parse UUID string. Expected 16 bytes, but read %d.\n", bytes_read); + return 1; + } + + printf("Converted to cudaUUID_t: "); + print_uuid(uuid); + printf("Consumer: Initializing CUDA...\n"); + fflush(stdout); + + // find the matching GPU + int deviceCount; + cudaError_t err = cudaGetDeviceCount(&deviceCount); + if (err != cudaSuccess) { + printf("ERROR getting device count: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: Found %d GPU(s), finding the matching device index\n", deviceCount); + int device_index = -1; + for (int i = 0; i < deviceCount; i++) { + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, i); + int match = memcmp(prop.uuid.bytes, uuid.bytes, sizeof(uuid.bytes)); + printf("Consumer: GPU index: %d, name: %s, match: %d, uuid: ", i, prop.name, match == 0); + print_uuid(prop.uuid); + if (match == 0) { + device_index = i; + break; + } + } + if (device_index == -1) { + printf("Could not find a CUDA device matching the UUID.\n"); + return 1; + } + + err = cudaSetDevice(device_index); + if (err != cudaSuccess) { + printf("ERROR setting CUDA device: %s\n", cudaGetErrorString(err)); + return 1; + } + + void* devPtr; + cudaIpcMemHandle_t handle; + + printf("Consumer: Reading IPC handle from shared volume...\n"); + fflush(stdout); + FILE* f = fopen("/shared/cuda_ipc_handle.dat", "rb"); + if (!f) { + printf("ERROR: Handle file not found\n"); + return 1; + } + fread(&handle, sizeof(handle), 1, f); + fclose(f); + + printf("Consumer: Opening IPC memory handle...\n"); + fflush(stdout); + err = cudaIpcOpenMemHandle(&devPtr, handle, cudaIpcMemLazyEnablePeerAccess); + if (err != cudaSuccess) { + printf("ERROR opening IPC handle: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: Successfully opened shared GPU memory!\n"); + fflush(stdout); + + // Read the data once + int* hostData = (int*)malloc(1024 * sizeof(int)); + err = cudaMemcpy(hostData, devPtr, 1024 * sizeof(int), cudaMemcpyDeviceToHost); + if (err != cudaSuccess) { + printf("ERROR reading GPU memory: %s\n", cudaGetErrorString(err)); + return 1; + } + + printf("Consumer: First 10 values from shared memory: "); + for (int i = 0; i < 10; i++) { + printf("%d ", hostData[i]); + } + printf("\n"); + fflush(stdout); + + // Verify expected pattern (index + 42) + bool correct = true; + for (int i = 0; i < 1024; i++) { + if (hostData[i] != i + 42) { + correct = false; + break; + } + } + + if (correct) { + printf("Consumer: ✓ Data verification PASSED!\n"); + } else { + printf("Consumer: ✗ Data verification FAILED!\n"); + } + + printf("Consumer: Success! Hanging infinitely...\n"); + fflush(stdout); + + // signal that the consumer has read the data through the IPC handle + FILE* ready = fopen("/tmp/ready", "w"); + if (!ready) { + printf("ERROR: Could not open liveness file for writing\n"); + return 1; + } + fclose(ready); + + // Hang forever + while (1) { + sleep(3600); + } + + return 0; + } + EOF + + echo "Compiling consumer..." + nvcc /tmp/consumer.cu -o /tmp/consumer + echo "Starting consumer..." + /tmp/consumer + resources: + claims: + - name: gpu + securityContext: + privileged: true + capabilities: + add: ["IPC_LOCK"] + volumeMounts: + - name: shared-volume + mountPath: /shared + + resourceClaims: + - name: gpu + resourceClaimName: single-gpu + + volumes: + - name: shared-volume + hostPath: + path: /tmp/cuda-ipc-shared-dra + type: DirectoryOrCreate + + tolerations: + - key: "nvidia.com/gpu" + operator: "Exists" + effect: "NoSchedule" + + restartPolicy: Never diff --git a/test/extended/dra/time_slicing.go b/test/extended/dra/time_slicing.go new file mode 100644 index 000000000000..7dfe19ff3b75 --- /dev/null +++ b/test/extended/dra/time_slicing.go @@ -0,0 +1,160 @@ +package dra + +import ( + "context" + "fmt" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// sharing a gpu with TimeSlicing +type gpuTimeSlicingWithCUDASpec struct { + f *framework.Framework + class string + // the node onto which the pod is expected to run + node *corev1.Node + driver *nvidia.GpuOperator + dra *nvidia.NvidiaDRADriverGPU +} + +// One pod, two containers, each container sharing a gpu with TimeSlicing +func (spec gpuTimeSlicingWithCUDASpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + clientset := spec.f.ClientSet + + // TimeSlicing strategy + const ( + timeSlicingStrategy = `{ + "apiVersion": "resource.nvidia.com/v1beta1", + "kind": "GpuConfig", + "sharing": { + "strategy": "TimeSlicing", + "timeSlicingConfig": { + "interval": "Long" + } + } +} +` + ) + + template := &resourceapi.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "shared-gpu", + }, + } + template.Spec.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + { + Name: "ts-gpu", + Exactly: &resourceapi.ExactDeviceRequest{DeviceClassName: spec.class}, + }, + } + template.Spec.Spec.Devices.Config = []resourceapi.DeviceClaimConfiguration{ + { + Requests: []string{"ts-gpu"}, + DeviceConfiguration: resourceapi.DeviceConfiguration{ + Opaque: &resourceapi.OpaqueDeviceConfiguration{ + Driver: spec.class, + Parameters: runtime.RawExtension{Raw: []byte(timeSlicingStrategy)}, + }, + }, + }, + } + + newContainer := func(name string) corev1.Container { + ctr := corev1.Container{ + Name: name, + Image: "nvcr.io/nvidia/k8s/cuda-sample:nbody-cuda11.6.0-ubuntu18.04", + Command: []string{"bash", "-c"}, + Args: []string{"trap 'exit 0' TERM; /tmp/sample --benchmark --numbodies=4226048 & wait"}, + } + ctr.Resources.Claims = []corev1.ResourceClaim{{Name: "shared-gpu", Request: "ts-gpu"}} + return ctr + } + // one pod, two containers, each time sharing the device + pod := helper.NewPod(namespace, "pod") + pod.Spec.Containers = []corev1.Container{ + newContainer("ts-ctr0"), + newContainer("ts-ctr1"), + } + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "shared-gpu", + ResourceClaimTemplateName: ptr.To(template.Name), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + + g.By("creating external claim and pod") + t.Logf("creating resource template: \n%s\n", framework.PrettyPrintJSON(template)) + _, err := clientset.ResourceV1().ResourceClaimTemplates(namespace).Create(ctx, template, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + t.Logf("creating pod: \n%s\n", framework.PrettyPrintJSON(pod)) + pod, err = clientset.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pod)) + + client := clientset.ResourceV1().ResourceClaims(namespace) + result, err := client.List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(result)) + }) + + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, spec.f.ClientSet, pod) + o.Expect(err).To(o.BeNil()) + + // the pod should run on the expected node + pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := helper.GetResourceClaimFor(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + + device, pool, err := helper.GetAllocatedDeviceForRequest("ts-gpu", claim) + o.Expect(err).To(o.BeNil()) + o.Expect(device).ToNot(o.BeEmpty()) + o.Expect(pool).ToNot(o.BeEmpty()) + t.Logf("device %q has been allocated to claim: %q", device, claim.Name) + + // get the index of the gpu allocated to us + gpu, err := spec.dra.GetGPUFromResourceSlice(ctx, spec.node, device) + o.Expect(err).To(o.BeNil()) + o.Expect(gpu.UUID).ToNot(o.BeEmpty()) + o.Expect(gpu.Index).ToNot(o.BeEmpty()) + + lines, err := spec.driver.RunNvidiSMI(ctx, spec.node, "-i", gpu.Index) + o.Expect(err).To(o.BeNil()) + o.Expect(lines).Should(o.ContainElements(o.ContainSubstring("/tmp/sample"), o.ContainSubstring("/tmp/sample"))) + + g.By(fmt.Sprintf("retrieving logs for pod %s/%s", pod.Namespace, pod.Name)) + for _, ctr := range pod.Spec.Containers { + logs, err := helper.GetLogs(ctx, spec.f.ClientSet, pod.Namespace, pod.Name, ctr.Name) + o.Expect(err).To(o.BeNil()) + t.Logf("logs from container: %s\n%s\n", ctr.Name, logs) + } +} diff --git a/test/extended/dra/ts_mps_spec.go b/test/extended/dra/ts_mps_spec.go new file mode 100644 index 000000000000..5b9770fcb572 --- /dev/null +++ b/test/extended/dra/ts_mps_spec.go @@ -0,0 +1,184 @@ +package dra + +import ( + "context" + "fmt" + "testing" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/api/resource/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/test/e2e/framework" + e2epodutil "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/utils/ptr" + + helper "github.com/openshift/origin/test/extended/dra/helper" + nvidia "github.com/openshift/origin/test/extended/dra/nvidia" +) + +// reference: +// - https://github.com/NVIDIA/k8s-dra-driver-gpu/blob/0878a8d3d1f8f2cd36fb09c03e83b5dfb7b39660/demo/specs/quickstart/gpu-test5.yaml +// +// a) this test need at least two gpus +// b) one pod, 4 contaiers, 2 containers time slicing one gpu, +// and 2 containers sharing a gpu with MPS +type gpuTimeSlicingAndMPSWithCUDASpec struct { + f *framework.Framework + class string + // the node onto which the pod is expected to run + node *corev1.Node + driver *nvidia.GpuOperator +} + +func (spec gpuTimeSlicingAndMPSWithCUDASpec) Test(ctx context.Context, t testing.TB) { + namespace := spec.f.Namespace.Name + clientset := spec.f.ClientSet + + // create a resource claim template that uses both time slicing and MPS + const ( + timeSlicingStrategy = `{ + "apiVersion": "resource.nvidia.com/v1beta1", + "kind": "GpuConfig", + "sharing": { + "strategy": "TimeSlicing", + "timeSlicingConfig": { + "interval": "Long" + } + } +} +` + mpsStrategy = `{ + "apiVersion": "resource.nvidia.com/v1beta1", + "kind": "GpuConfig", + "sharing": { + "strategy": "MPS", + "mpsConfig": { + "defaultActiveThreadPercentage": 50, + "defaultPinnedDeviceMemoryLimit": "10Gi" + } + } +} +` + ) + + template := &resourceapi.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "multiple-gpus", + }, + } + template.Spec.Spec.Devices.Requests = []resourceapi.DeviceRequest{ + {Name: "ts-gpu", Exactly: &resourceapi.ExactDeviceRequest{DeviceClassName: spec.class}}, + {Name: "mps-gpu", Exactly: &resourceapi.ExactDeviceRequest{DeviceClassName: spec.class}}, + } + template.Spec.Spec.Devices.Config = []resourceapi.DeviceClaimConfiguration{ + { + Requests: []string{"ts-gpu"}, + DeviceConfiguration: resourceapi.DeviceConfiguration{ + Opaque: &resourceapi.OpaqueDeviceConfiguration{ + Driver: spec.class, + Parameters: runtime.RawExtension{Raw: []byte(timeSlicingStrategy)}, + }, + }, + }, + { + Requests: []string{"mps-gpu"}, + DeviceConfiguration: resourceapi.DeviceConfiguration{ + Opaque: &resourceapi.OpaqueDeviceConfiguration{ + Driver: spec.class, + Parameters: runtime.RawExtension{Raw: []byte(mpsStrategy)}, + }, + }, + }, + } + + newContainer := func(name string, request string) corev1.Container { + ctr := corev1.Container{ + Name: name, + Image: "nvcr.io/nvidia/k8s/cuda-sample:nbody-cuda11.6.0-ubuntu18.04", + Command: []string{"bash", "-c"}, + Args: []string{"trap 'exit 0' TERM; /tmp/sample --benchmark --numbodies=4226048 & wait"}, + } + ctr.Resources.Claims = []corev1.ResourceClaim{{Name: "shared-gpus", Request: request}} + return ctr + } + + // one pod, and one container for each of the devices + pod := helper.NewPod(namespace, "pod") + pod.Spec.Containers = []corev1.Container{ + newContainer("ts-ctr0", "ts-gpu"), + newContainer("ts-ctr1", "ts-gpu"), + newContainer("mps-ctr0", "mps-gpu"), + newContainer("mps-ctr1", "mps-gpu"), + } + pod.Spec.ResourceClaims = []corev1.PodResourceClaim{ + { + Name: "shared-gpus", + ResourceClaimTemplateName: ptr.To(template.Name), + }, + } + pod.Spec.Tolerations = []corev1.Toleration{ + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + } + + g.By("creating external claim and pod") + t.Logf("creating resource template: \n%s\n", framework.PrettyPrintJSON(template)) + _, err := clientset.ResourceV1().ResourceClaimTemplates(namespace).Create(ctx, template, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + t.Logf("creating pod: \n%s\n", framework.PrettyPrintJSON(pod)) + pod, err = clientset.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).To(o.BeNil()) + + g.DeferCleanup(func(ctx context.Context) { + g.By(fmt.Sprintf("listing resources in namespace: %s", namespace)) + t.Logf("pod in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(pod)) + + client := clientset.ResourceV1().ResourceClaims(namespace) + result, err := client.List(ctx, metav1.ListOptions{}) + o.Expect(err).Should(o.BeNil()) + t.Logf("resource claim in test namespace: %s\n%s", namespace, framework.PrettyPrintJSON(result)) + }) + + g.By(fmt.Sprintf("waiting for pod %s/%s to be running", pod.Namespace, pod.Name)) + err = e2epodutil.WaitForPodRunningInNamespace(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + + // the pod should run on the expected node + pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + o.Expect(err).To(o.BeNil()) + o.Expect(pod.Spec.NodeName).To(o.Equal(spec.node.Name)) + + claim, err := helper.GetResourceClaimFor(ctx, clientset, pod) + o.Expect(err).To(o.BeNil()) + o.Expect(claim).ToNot(o.BeNil()) + + device, pool, err := helper.GetAllocatedDeviceForRequest("mps-gpu", claim) + o.Expect(err).To(o.BeNil()) + o.Expect(device).ToNot(o.BeEmpty()) + o.Expect(pool).ToNot(o.BeEmpty()) + t.Logf("device %q has been allocated to claim: %q", device, claim.Name) + + device, pool, err = helper.GetAllocatedDeviceForRequest("ts-gpu", claim) + o.Expect(err).To(o.BeNil()) + o.Expect(device).ToNot(o.BeEmpty()) + o.Expect(pool).ToNot(o.BeEmpty()) + t.Logf("device %q has been allocated to claim: %q", device, claim.Name) + + _, err = spec.driver.RunNvidiSMI(ctx, spec.node) + o.Expect(err).To(o.BeNil()) + + g.By(fmt.Sprintf("retrieving logs for pod %s/%s", pod.Namespace, pod.Name)) + for _, ctr := range pod.Spec.Containers { + logs, err := helper.GetLogs(ctx, clientset, pod.Namespace, pod.Name, ctr.Name) + o.Expect(err).To(o.BeNil()) + t.Logf("logs from container: %s\n%s\n", ctr.Name, logs) + } +} diff --git a/test/extended/include.go b/test/extended/include.go index 002f778bfda1..2f3fda583eea 100644 --- a/test/extended/include.go +++ b/test/extended/include.go @@ -25,6 +25,7 @@ import ( _ "github.com/openshift/origin/test/extended/deployments" _ "github.com/openshift/origin/test/extended/dns" _ "github.com/openshift/origin/test/extended/dr" + _ "github.com/openshift/origin/test/extended/dra" _ "github.com/openshift/origin/test/extended/etcd" _ "github.com/openshift/origin/test/extended/extension" _ "github.com/openshift/origin/test/extended/idling" diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml new file mode 100644 index 000000000000..a8bc979e02ef --- /dev/null +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore new file mode 100644 index 000000000000..45ad0f1ae309 --- /dev/null +++ b/vendor/dario.cat/mergo/.gitignore @@ -0,0 +1,36 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Golang/Intellij +.idea + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml new file mode 100644 index 000000000000..d324c43ba4df --- /dev/null +++ b/vendor/dario.cat/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..469b44907a09 --- /dev/null +++ b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md new file mode 100644 index 000000000000..0a1ff9f94d85 --- /dev/null +++ b/vendor/dario.cat/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE new file mode 100644 index 000000000000..686680298da2 --- /dev/null +++ b/vendor/dario.cat/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md new file mode 100644 index 000000000000..0b3c488893b0 --- /dev/null +++ b/vendor/dario.cat/mergo/README.md @@ -0,0 +1,258 @@ +# Mergo + +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] + +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.amrom.workers.dev%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.amrom.workers.dev%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.amrom.workers.dev-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. + +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` + +#### 0.3.9 + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) + +## Install + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.amrom.workers.dev%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.amrom.workers.dev%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md new file mode 100644 index 000000000000..a5de61f77ba7 --- /dev/null +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go new file mode 100644 index 000000000000..7d96ec0546d8 --- /dev/null +++ b/vendor/dario.cat/mergo/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +# Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +# Install + +Do your usual installation procedure: + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +# Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "dario.cat/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +# Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +# Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +# About + +Written by Dario Castañé: https://da.rio.hn + +# License + +BSD 3-Clause license, as Go language. +*/ +package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go new file mode 100644 index 000000000000..759b4f74fd5a --- /dev/null +++ b/vendor/dario.cat/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go new file mode 100644 index 000000000000..fd47c95b2b84 --- /dev/null +++ b/vendor/dario.cat/mergo/merge.go @@ -0,0 +1,409 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Transformers Transformers + Overwrite bool + ShouldNotDereference bool + AppendSlice bool + TypeCheck bool + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite && dst.CanSet() { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if src.Elem().Kind() != reflect.Struct { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go new file mode 100644 index 000000000000..0a721e2d8586 --- /dev/null +++ b/vendor/dario.cat/mergo/mergo.go @@ -0,0 +1,81 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerArgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + typ reflect.Type + next *visit + ptr uintptr +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 593b10ab6963..194d5e9c94d3 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) { return nextState, err } - switch { - case isOscStringTerminator(b): + // There are several control characters and sequences which can + // terminate an OSC string. Most of them are handled by the baseState + // handler. The ANSI_BEL character is a special case which behaves as a + // terminator only for an OSC string. + if b == ANSI_BEL { return oscState.parser.ground, nil } return oscState, nil } - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000000..fe79e3adda29 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000000..01b5743200b8 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000000..235496eeb29e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.18 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000000..3fa516caa20b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,638 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/fs" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(any) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v any) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v any) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v any) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded any + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// # Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v any) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := io.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data any, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } + return nil + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]any) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data any, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return md.parseErr(err) + } + return nil +} + +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + d := string(md.data) + return ParseError{ + Message: err.Error(), + err: err, + LastKey: k, + Position: md.keyInfo[k].pos.withCol(d), + Line: md.keyInfo[k].pos.Line, + input: d, + } +} + +func (md *MetaData) e(format string, args ...any) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v any) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000000..155709a80b88 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v any) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000000..82c90a905701 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,8 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000000..ac196e7df881 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,776 @@ +package toml + +import ( + "bufio" + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: bufio.NewWriter(w), Indent: " "} +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v any) error { + rv := eindirect(reflect.ValueOf(v)) + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []reflect.Value + for _, mapKey := range rv.MapKeys() { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, mapKey) + } else { + mapKeysDirect = append(mapKeysDirect, mapKey) + } + } + + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey.String()}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey.String()), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int, totalFields int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != totalFields-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. + if len(key) == 0 { + enc.eElement(val) + return + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...any) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 000000000000..b7077d3ae33d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,347 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Col int // Error column, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Length of the error in bytes. +} + +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll + } + return p +} + +func (pe ParseError) Error() string { + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, pe.Message) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + var ( + lines = strings.Split(pe.input, "\n") + b = new(strings.Builder) + ) + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + pe.Message, pe.Position.Line, pe.Position.Col) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) + } + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errParseDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65,535 + uint32 │ 0 │ 4,294,967,295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000000..022f15bc2b81 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000000..1c3b4770293c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1272 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemStringEsc + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + esc bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string, tomlNext bool) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError && w == 1 { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...any) stateFn { + if lx.atEOF { + pos := lx.getPos() + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.esc = false + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + lx.esc = true + r := lx.next() + switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHex(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHex(r) { + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ, item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } +func isBareKeyChar(r rune, tomlNext bool) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 000000000000..0d337026c19c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,145 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]any + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]any + ok bool + hashOrVal any = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]any); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } + } + return b.String() +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue + } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +// Like append(), but only increase the cap by 1. +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000000..e3ea8a9a2d2e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,845 @@ +package toml + +import ( + "fmt" + "math" + "os" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]any // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]any), + lx: lex(data, tomlNext), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + Message: err.Error(), + err: err, + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos.withCol(p.lx.input), + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Line, + LastKey: p.current(), + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...any) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemStringEsc, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { + switch it.typ { + case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (any, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (any, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + if signbit { + num = math.Copysign(num, -1) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (any, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } + ok = true + break + } + } + if !ok { + p.panicErr(it, errParseDate{it.val}) + } + return t, p.typeOfPrimitive(it) +} + +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + + // XXX: type isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = typ + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { + var ( + topHash = make(map[string]any) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return topHash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHex is a superset of all the permissible characters surrounding an + // underscore. + accept = isHex(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + /// Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0, len(key)-1) + + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]any) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]any: + hashContext = t[len(t)-1] + case map[string]any: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key.last() + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]any, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key.last(), make(map[string]any)) + } + p.context = append(p.context, key.last()) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value any) { + var ( + tmpHash any + ok bool + hash = p.mapping + keyContext = make(Key, 0, len(p.context)+1) + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]any: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]any: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } + } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 + } +} + +func (p *parser) replaceEscapes(it item, str string) string { + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- + continue + } + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { + p.bug("Escape sequence at end of string.") + return "" + } + switch str[i+1] { + default: + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) + case 'b': + b.WriteByte(0x08) + skip = 1 + case 't': + b.WriteByte(0x09) + skip = 1 + case 'n': + b.WriteByte(0x0a) + skip = 1 + case 'f': + b.WriteByte(0x0c) + skip = 1 + case 'r': + b.WriteByte(0x0d) + skip = 1 + case 'e': + if p.tomlNext { + b.WriteByte(0x1b) + skip = 1 + } + case '"': + b.WriteByte(0x22) + skip = 1 + case '\\': + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 + } + case 'u': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 + case 'U': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 + } + } + return b.String() +} + +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000000..10c51f7eeb41 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,238 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 000000000000..1c090d331e3f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,65 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString, itemStringEsc: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 000000000000..4025e01ec4a9 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 000000000000..d700ec47f2b8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 000000000000..163ffe72a82d --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 000000000000..657564a8474d --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 000000000000..8dbd9248583a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 000000000000..272670231ab1 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 000000000000..741bb530e8ad --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 000000000000..034cad8e2107 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 000000000000..6b061e6174b3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 000000000000..fbc6332592f6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 000000000000..f95a504fe703 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,242 @@ +# Changelog + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 000000000000..9ff7da9c48b6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 000000000000..9ca87a2c79ea --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,31 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 000000000000..ed56936084b9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,258 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Package Versions + +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include pre-releases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering pre-releases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The variable a will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification, pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 000000000000..a30a66b1f747 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 000000000000..a78235895fdc --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 000000000000..8461c7ed9038 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 000000000000..74f97caa57f8 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 000000000000..ff499fb66404 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract major, minor, and patch + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 000000000000..5e3002f88f51 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 000000000000..b5ef766a7af8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,401 @@ +# Changelog + +## Release 3.3.0 (2024-08-29) + +### Added + +- #400: added sha512sum function (thanks @itzik-elayev) + +### Changed + +- #407: Removed duplicate documentation (functions were documentated in 2 places) +- #290: Corrected copy/paster oops in math documentation (thanks @zzhu41) +- #369: Corrected template reference in docs (thanks @chey) +- #375: Added link to URL documenation (thanks @carlpett) +- #406: Updated the mergo dependency which had a breaking change (which was accounted for) +- #376: Fixed documentation error (thanks @jheyduk) +- #404: Updated dependency tree +- #391: Fixed misspelling (thanks @chrishalbert) +- #405: Updated Go versions used in testing + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `initial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 000000000000..f311b1eaaaa8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 000000000000..78d409cde2c5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 000000000000..3e22c60e1a01 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,100 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig/v3" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 000000000000..75fe027e4d36 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,659 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha512sum(input string) string { + hash := sha512.Sum512([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 000000000000..ed022ddacac0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 000000000000..b9f979666dd3 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 000000000000..4315b3542adb --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "dario.cat/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 000000000000..91031d6d197e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := template.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 000000000000..cda47d26f27b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,385 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + // camelcase used to call xstrings.ToCamelCase, but that function had a breaking change in version + // 1.5 that moved it from upper camel case to lower camel case. This is a breaking change for sprig. + // A new xstrings.ToPascalCase function was added that provided upper camel case. + "camelcase": xstrings.ToPascalCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "sha512sum": sha512sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 000000000000..ca0fbb789328 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 000000000000..108d78a94627 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 000000000000..f68e4182ee60 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 000000000000..8a65c132f08f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 000000000000..fab551018977 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 000000000000..3fbe08aa6376 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 000000000000..e0ae628c8417 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 000000000000..b8e120e19ba4 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore new file mode 100644 index 000000000000..4a0699f0b7f3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.gitignore @@ -0,0 +1 @@ +squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml new file mode 100644 index 000000000000..7bb6da4878bf --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + +services: + - mysql + - postgresql + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +before_script: + - mysql -e 'CREATE DATABASE squirrel;' + - psql -c 'CREATE DATABASE squirrel;' -U postgres + +script: + - go test + - cd integration + - go test -args -driver sqlite3 + - go test -args -driver mysql -dataSource travis@/squirrel + - go test -args -driver postgres -dataSource 'postgres://postgres@localhost/squirrel?sslmode=disable' + +notifications: + irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE b/vendor/github.com/Masterminds/squirrel/LICENSE new file mode 100644 index 000000000000..b459007fd88f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Squirrel: The Masterminds +Copyright (c) 2014-2015, Lann Martin. Copyright (C) 2015-2016, Google. Copyright (C) 2015, Matt Farina and Matt Butcher. + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md new file mode 100644 index 000000000000..1d37f282f1cd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/README.md @@ -0,0 +1,142 @@ +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +### Squirrel is "complete". +Bug fixes will still be merged (slowly). Bug reports are welcome, but I will not necessarily respond to them. If another fork (or substantially similar project) actively improves on what Squirrel does, let me know and I may link to it here. + + +# Squirrel - fluent SQL generator for Go + +```go +import "github.com/Masterminds/squirrel" +``` + + +[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) +[![Build Status](https://api.travis-ci.org/Masterminds/squirrel.svg?branch=master)](https://travis-ci.org/Masterminds/squirrel) + +**Squirrel is not an ORM.** For an application of Squirrel, check out +[structable, a table-struct mapper](https://github.com/Masterminds/structable) + + +Squirrel helps you build SQL queries from composable parts: + +```go +import sq "github.com/Masterminds/squirrel" + +users := sq.Select("*").From("users").Join("emails USING (email_id)") + +active := users.Where(sq.Eq{"deleted_at": nil}) + +sql, args, err := active.ToSql() + +sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" +``` + +```go +sql, args, err := sq. + Insert("users").Columns("name", "age"). + Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). + ToSql() + +sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" +``` + +Squirrel can also execute queries directly: + +```go +stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) +three_stooges := stooges.Limit(3) +rows, err := three_stooges.RunWith(db).Query() + +// Behaves like: +rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", + "moe", "larry", "curly", "shemp") +``` + +Squirrel makes conditional query building a breeze: + +```go +if len(q) > 0 { + users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) +} +``` + +Squirrel wants to make your life easier: + +```go +// StmtCache caches Prepared Stmts for you +dbCache := sq.NewStmtCache(db) + +// StatementBuilder keeps your syntax neat +mydb := sq.StatementBuilder.RunWith(dbCache) +select_users := mydb.Select("*").From("users") +``` + +Squirrel loves PostgreSQL: + +```go +psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) + +// You use question marks for placeholders... +sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna").ToSql() + +/// ...squirrel replaces them using PlaceholderFormat. +sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" + + +/// You can retrieve id ... +query := sq.Insert("nodes"). + Columns("uuid", "type", "data"). + Values(node.Uuid, node.Type, node.Data). + Suffix("RETURNING \"id\""). + RunWith(m.db). + PlaceholderFormat(sq.Dollar) + +query.QueryRow().Scan(&node.id) +``` + +You can escape question marks by inserting two question marks: + +```sql +SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] +``` + +will generate with the Dollar Placeholder: + +```sql +SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] +``` + +## FAQ + +* **How can I build an IN query on composite keys / tuples, e.g. `WHERE (col1, col2) IN ((1,2),(3,4))`? ([#104](https://github.com/Masterminds/squirrel/issues/104))** + + Squirrel does not explicitly support tuples, but you can get the same effect with e.g.: + + ```go + sq.Or{ + sq.Eq{"col1": 1, "col2": 2}, + sq.Eq{"col1": 3, "col2": 4}} + ``` + + ```sql + WHERE (col1 = 1 AND col2 = 2) OR (col1 = 3 AND col2 = 4) + ``` + + (which should produce the same query plan as the tuple version) + +* **Why doesn't `Eq{"mynumber": []uint8{1,2,3}}` turn into an `IN` query? ([#114](https://github.com/Masterminds/squirrel/issues/114))** + + Values of type `[]byte` are handled specially by `database/sql`. In Go, [`byte` is just an alias of `uint8`](https://golang.org/pkg/builtin/#byte), so there is no way to distinguish `[]uint8` from `[]byte`. + +* **Some features are poorly documented!** + + This isn't a frequent complaints section! + +* **Some features are poorly documented?** + + Yes. The tests should be considered a part of the documentation; take a look at those for ideas on how to express more complex queries. + +## License + +Squirrel is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go new file mode 100644 index 000000000000..299e14b9d4a0 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/case.go @@ -0,0 +1,128 @@ +package squirrel + +import ( + "bytes" + "errors" + + "github.com/lann/builder" +) + +func init() { + builder.Register(CaseBuilder{}, caseData{}) +} + +// sqlizerBuffer is a helper that allows to write many Sqlizers one by one +// without constant checks for errors that may come from Sqlizer +type sqlizerBuffer struct { + bytes.Buffer + args []interface{} + err error +} + +// WriteSql converts Sqlizer to SQL strings and writes it to buffer +func (b *sqlizerBuffer) WriteSql(item Sqlizer) { + if b.err != nil { + return + } + + var str string + var args []interface{} + str, args, b.err = nestedToSql(item) + + if b.err != nil { + return + } + + b.WriteString(str) + b.WriteByte(' ') + b.args = append(b.args, args...) +} + +func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { + return b.String(), b.args, b.err +} + +// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression +type whenPart struct { + when Sqlizer + then Sqlizer +} + +func newWhenPart(when interface{}, then interface{}) whenPart { + return whenPart{newPart(when), newPart(then)} +} + +// caseData holds all the data required to build a CASE SQL construct +type caseData struct { + What Sqlizer + WhenParts []whenPart + Else Sqlizer +} + +// ToSql implements Sqlizer +func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.WhenParts) == 0 { + err = errors.New("case expression must contain at lease one WHEN clause") + + return + } + + sql := sqlizerBuffer{} + + sql.WriteString("CASE ") + if d.What != nil { + sql.WriteSql(d.What) + } + + for _, p := range d.WhenParts { + sql.WriteString("WHEN ") + sql.WriteSql(p.when) + sql.WriteString("THEN ") + sql.WriteSql(p.then) + } + + if d.Else != nil { + sql.WriteString("ELSE ") + sql.WriteSql(d.Else) + } + + sql.WriteString("END") + + return sql.ToSql() +} + +// CaseBuilder builds SQL CASE construct which could be used as parts of queries. +type CaseBuilder builder.Builder + +// ToSql builds the query into a SQL string and bound args. +func (b CaseBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(caseData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b CaseBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// what sets optional value for CASE construct "CASE [value] ..." +func (b CaseBuilder) what(expr interface{}) CaseBuilder { + return builder.Set(b, "What", newPart(expr)).(CaseBuilder) +} + +// When adds "WHEN ... THEN ..." part to CASE construct +func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { + // TODO: performance hint: replace slice of WhenPart with just slice of parts + // where even indices of the slice belong to "when"s and odd indices belong to "then"s + return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) +} + +// What sets optional "ELSE ..." part for CASE construct +func (b CaseBuilder) Else(expr interface{}) CaseBuilder { + return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go new file mode 100644 index 000000000000..f3f31e63ef10 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete.go @@ -0,0 +1,191 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type deleteData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + From string + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *deleteData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.From) == 0 { + err = fmt.Errorf("delete statements must specify a From table") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("DELETE FROM ") + sql.WriteString(d.From) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// DeleteBuilder builds SQL DELETE statements. +type DeleteBuilder builder.Builder + +func init() { + builder.Register(DeleteBuilder{}, deleteData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { + return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { + return setRunWith(b, runner).(DeleteBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b DeleteBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.Exec() +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b DeleteBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(deleteData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b DeleteBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b DeleteBuilder) PrefixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Prefixes", expr).(DeleteBuilder) +} + +// From sets the table to be deleted from. +func (b DeleteBuilder) From(from string) DeleteBuilder { + return builder.Set(b, "From", from).(DeleteBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { + return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) +} + +// Suffix adds an expression to the end of the query +func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b DeleteBuilder) SuffixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Suffixes", expr).(DeleteBuilder) +} + +func (b DeleteBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.Query() +} + +func (d *deleteData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete_ctx.go b/vendor/github.com/Masterminds/squirrel/delete_ctx.go new file mode 100644 index 000000000000..de83c55df3c6 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *deleteData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(deleteData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b DeleteBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go new file mode 100644 index 000000000000..eba1b4579e59 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/expr.go @@ -0,0 +1,419 @@ +package squirrel + +import ( + "bytes" + "database/sql/driver" + "fmt" + "reflect" + "sort" + "strings" +) + +const ( + // Portable true/false literals. + sqlTrue = "(1=1)" + sqlFalse = "(1=0)" +) + +type expr struct { + sql string + args []interface{} +} + +// Expr builds an expression from a SQL fragment and arguments. +// +// Ex: +// Expr("FROM_UNIXTIME(?)", t) +func Expr(sql string, args ...interface{}) Sqlizer { + return expr{sql: sql, args: args} +} + +func (e expr) ToSql() (sql string, args []interface{}, err error) { + simple := true + for _, arg := range e.args { + if _, ok := arg.(Sqlizer); ok { + simple = false + } + } + if simple { + return e.sql, e.args, nil + } + + buf := &bytes.Buffer{} + ap := e.args + sp := e.sql + + var isql string + var iargs []interface{} + + for err == nil && len(ap) > 0 && len(sp) > 0 { + i := strings.Index(sp, "?") + if i < 0 { + // no more placeholders + break + } + if len(sp) > i+1 && sp[i+1:i+2] == "?" { + // escaped "??"; append it and step past + buf.WriteString(sp[:i+2]) + sp = sp[i+2:] + continue + } + + if as, ok := ap[0].(Sqlizer); ok { + // sqlizer argument; expand it and append the result + isql, iargs, err = as.ToSql() + buf.WriteString(sp[:i]) + buf.WriteString(isql) + args = append(args, iargs...) + } else { + // normal argument; append it and the placeholder + buf.WriteString(sp[:i+1]) + args = append(args, ap[0]) + } + + // step past the argument and placeholder + ap = ap[1:] + sp = sp[i+1:] + } + + // append the remaining sql and arguments + buf.WriteString(sp) + return buf.String(), append(args, ap...), err +} + +type concatExpr []interface{} + +func (ce concatExpr) ToSql() (sql string, args []interface{}, err error) { + for _, part := range ce { + switch p := part.(type) { + case string: + sql += p + case Sqlizer: + pSql, pArgs, err := p.ToSql() + if err != nil { + return "", nil, err + } + sql += pSql + args = append(args, pArgs...) + default: + return "", nil, fmt.Errorf("%#v is not a string or Sqlizer", part) + } + } + return +} + +// ConcatExpr builds an expression by concatenating strings and other expressions. +// +// Ex: +// name_expr := Expr("CONCAT(?, ' ', ?)", firstName, lastName) +// ConcatExpr("COALESCE(full_name,", name_expr, ")") +func ConcatExpr(parts ...interface{}) concatExpr { + return concatExpr(parts) +} + +// aliasExpr helps to alias part of SQL query generated with underlying "expr" +type aliasExpr struct { + expr Sqlizer + alias string +} + +// Alias allows to define alias for column in SelectBuilder. Useful when column is +// defined as complex expression like IF or CASE +// Ex: +// .Column(Alias(caseStmt, "case_column")) +func Alias(expr Sqlizer, alias string) aliasExpr { + return aliasExpr{expr, alias} +} + +func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { + sql, args, err = e.expr.ToSql() + if err == nil { + sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) + } + return +} + +// Eq is syntactic sugar for use with Where/Having/Set methods. +type Eq map[string]interface{} + +func (eq Eq) toSQL(useNotOpr bool) (sql string, args []interface{}, err error) { + if len(eq) == 0 { + // Empty Sql{} evaluates to true. + sql = sqlTrue + return + } + + var ( + exprs []string + equalOpr = "=" + inOpr = "IN" + nullOpr = "IS" + inEmptyExpr = sqlFalse + ) + + if useNotOpr { + equalOpr = "<>" + inOpr = "NOT IN" + nullOpr = "IS NOT" + inEmptyExpr = sqlTrue + } + + sortedKeys := getSortedKeys(eq) + for _, key := range sortedKeys { + var expr string + val := eq[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + r := reflect.ValueOf(val) + if r.Kind() == reflect.Ptr { + if r.IsNil() { + val = nil + } else { + val = r.Elem().Interface() + } + } + + if val == nil { + expr = fmt.Sprintf("%s %s NULL", key, nullOpr) + } else { + if isListType(val) { + valVal := reflect.ValueOf(val) + if valVal.Len() == 0 { + expr = inEmptyExpr + if args == nil { + args = []interface{}{} + } + } else { + for i := 0; i < valVal.Len(); i++ { + args = append(args, valVal.Index(i).Interface()) + } + expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) + } + } else { + expr = fmt.Sprintf("%s %s ?", key, equalOpr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (eq Eq) ToSql() (sql string, args []interface{}, err error) { + return eq.toSQL(false) +} + +// NotEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(NotEq{"id": 1}) == "id <> 1" +type NotEq Eq + +func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { + return Eq(neq).toSQL(true) +} + +// Like is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(Like{"name": "%irrel"}) +type Like map[string]interface{} + +func (lk Like) toSql(opr string) (sql string, args []interface{}, err error) { + var exprs []string + for key, val := range lk { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with like operators") + return + } else { + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with like operators") + return + } else { + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lk Like) ToSql() (sql string, args []interface{}, err error) { + return lk.toSql("LIKE") +} + +// NotLike is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(NotLike{"name": "%irrel"}) +type NotLike Like + +func (nlk NotLike) ToSql() (sql string, args []interface{}, err error) { + return Like(nlk).toSql("NOT LIKE") +} + +// ILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(ILike{"name": "sq%"}) +type ILike Like + +func (ilk ILike) ToSql() (sql string, args []interface{}, err error) { + return Like(ilk).toSql("ILIKE") +} + +// NotILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(NotILike{"name": "sq%"}) +type NotILike Like + +func (nilk NotILike) ToSql() (sql string, args []interface{}, err error) { + return Like(nilk).toSql("NOT ILIKE") +} + +// Lt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Lt{"id": 1}) +type Lt map[string]interface{} + +func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + opr = "<" + ) + + if opposite { + opr = ">" + } + + if orEq { + opr = fmt.Sprintf("%s%s", opr, "=") + } + + sortedKeys := getSortedKeys(lt) + for _, key := range sortedKeys { + var expr string + val := lt[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with less than or greater than operators") + return + } + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with less than or greater than operators") + return + } + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lt Lt) ToSql() (sql string, args []interface{}, err error) { + return lt.toSql(false, false) +} + +// LtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(LtOrEq{"id": 1}) == "id <= 1" +type LtOrEq Lt + +func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(ltOrEq).toSql(false, true) +} + +// Gt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Gt{"id": 1}) == "id > 1" +type Gt Lt + +func (gt Gt) ToSql() (sql string, args []interface{}, err error) { + return Lt(gt).toSql(true, false) +} + +// GtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(GtOrEq{"id": 1}) == "id >= 1" +type GtOrEq Lt + +func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(gtOrEq).toSql(true, true) +} + +type conj []Sqlizer + +func (c conj) join(sep, defaultExpr string) (sql string, args []interface{}, err error) { + if len(c) == 0 { + return defaultExpr, []interface{}{}, nil + } + var sqlParts []string + for _, sqlizer := range c { + partSQL, partArgs, err := nestedToSql(sqlizer) + if err != nil { + return "", nil, err + } + if partSQL != "" { + sqlParts = append(sqlParts, partSQL) + args = append(args, partArgs...) + } + } + if len(sqlParts) > 0 { + sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) + } + return +} + +// And conjunction Sqlizers +type And conj + +func (a And) ToSql() (string, []interface{}, error) { + return conj(a).join(" AND ", sqlTrue) +} + +// Or conjunction Sqlizers +type Or conj + +func (o Or) ToSql() (string, []interface{}, error) { + return conj(o).join(" OR ", sqlFalse) +} + +func getSortedKeys(exp map[string]interface{}) []string { + sortedKeys := make([]string, 0, len(exp)) + for k := range exp { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +func isListType(val interface{}) bool { + if driver.IsValue(val) { + return false + } + valVal := reflect.ValueOf(val) + return valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice +} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go new file mode 100644 index 000000000000..c23a57935ba8 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert.go @@ -0,0 +1,298 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "sort" + "strings" + + "github.com/lann/builder" +) + +type insertData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + StatementKeyword string + Options []string + Into string + Columns []string + Values [][]interface{} + Suffixes []Sqlizer + Select *SelectBuilder +} + +func (d *insertData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *insertData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *insertData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Into) == 0 { + err = errors.New("insert statements must specify a table") + return + } + if len(d.Values) == 0 && d.Select == nil { + err = errors.New("insert statements must have at least one set of values or select clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + if d.StatementKeyword == "" { + sql.WriteString("INSERT ") + } else { + sql.WriteString(d.StatementKeyword) + sql.WriteString(" ") + } + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + sql.WriteString("INTO ") + sql.WriteString(d.Into) + sql.WriteString(" ") + + if len(d.Columns) > 0 { + sql.WriteString("(") + sql.WriteString(strings.Join(d.Columns, ",")) + sql.WriteString(") ") + } + + if d.Select != nil { + args, err = d.appendSelectToSQL(sql, args) + } else { + args, err = d.appendValuesToSQL(sql, args) + } + if err != nil { + return + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +func (d *insertData) appendValuesToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if len(d.Values) == 0 { + return args, errors.New("values for insert statements are not set") + } + + io.WriteString(w, "VALUES ") + + valuesStrings := make([]string, len(d.Values)) + for r, row := range d.Values { + valueStrings := make([]string, len(row)) + for v, val := range row { + if vs, ok := val.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return nil, err + } + valueStrings[v] = vsql + args = append(args, vargs...) + } else { + valueStrings[v] = "?" + args = append(args, val) + } + } + valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) + } + + io.WriteString(w, strings.Join(valuesStrings, ",")) + + return args, nil +} + +func (d *insertData) appendSelectToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if d.Select == nil { + return args, errors.New("select clause for insert statements are not set") + } + + selectClause, sArgs, err := d.Select.ToSql() + if err != nil { + return args, err + } + + io.WriteString(w, selectClause) + args = append(args, sArgs...) + + return args, nil +} + +// Builder + +// InsertBuilder builds SQL INSERT statements. +type InsertBuilder builder.Builder + +func init() { + builder.Register(InsertBuilder{}, insertData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { + return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { + return setRunWith(b, runner).(InsertBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b InsertBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b InsertBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b InsertBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b InsertBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(insertData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b InsertBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b InsertBuilder) PrefixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Prefixes", expr).(InsertBuilder) +} + +// Options adds keyword options before the INTO clause of the query. +func (b InsertBuilder) Options(options ...string) InsertBuilder { + return builder.Extend(b, "Options", options).(InsertBuilder) +} + +// Into sets the INTO clause of the query. +func (b InsertBuilder) Into(from string) InsertBuilder { + return builder.Set(b, "Into", from).(InsertBuilder) +} + +// Columns adds insert columns to the query. +func (b InsertBuilder) Columns(columns ...string) InsertBuilder { + return builder.Extend(b, "Columns", columns).(InsertBuilder) +} + +// Values adds a single row's values to the query. +func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { + return builder.Append(b, "Values", values).(InsertBuilder) +} + +// Suffix adds an expression to the end of the query +func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b InsertBuilder) SuffixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Suffixes", expr).(InsertBuilder) +} + +// SetMap set columns and values for insert builder from a map of column name and value +// note that it will reset all previous columns and values was set if any +func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { + // Keep the columns in a consistent order by sorting the column key string. + cols := make([]string, 0, len(clauses)) + for col := range clauses { + cols = append(cols, col) + } + sort.Strings(cols) + + vals := make([]interface{}, 0, len(clauses)) + for _, col := range cols { + vals = append(vals, clauses[col]) + } + + b = builder.Set(b, "Columns", cols).(InsertBuilder) + b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) + + return b +} + +// Select set Select clause for insert query +// If Values and Select are used, then Select has higher priority +func (b InsertBuilder) Select(sb SelectBuilder) InsertBuilder { + return builder.Set(b, "Select", &sb).(InsertBuilder) +} + +func (b InsertBuilder) statementKeyword(keyword string) InsertBuilder { + return builder.Set(b, "StatementKeyword", keyword).(InsertBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/insert_ctx.go b/vendor/github.com/Masterminds/squirrel/insert_ctx.go new file mode 100644 index 000000000000..4541c2fed325 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *insertData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b InsertBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b InsertBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go new file mode 100644 index 000000000000..c58f68f1a497 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/part.go @@ -0,0 +1,63 @@ +package squirrel + +import ( + "fmt" + "io" +) + +type part struct { + pred interface{} + args []interface{} +} + +func newPart(pred interface{}, args ...interface{}) Sqlizer { + return &part{pred, args} +} + +func (p part) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + sql, args, err = nestedToSql(pred) + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string or Sqlizer, not %T", pred) + } + return +} + +func nestedToSql(s Sqlizer) (string, []interface{}, error) { + if raw, ok := s.(rawSqlizer); ok { + return raw.toSqlRaw() + } else { + return s.ToSql() + } +} + +func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, p := range parts { + partSql, partArgs, err := nestedToSql(p) + if err != nil { + return nil, err + } else if len(partSql) == 0 { + continue + } + + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + + _, err = io.WriteString(w, partSql) + if err != nil { + return nil, err + } + args = append(args, partArgs...) + } + return args, nil +} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go new file mode 100644 index 000000000000..8e97a6c62d4b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/placeholder.go @@ -0,0 +1,114 @@ +package squirrel + +import ( + "bytes" + "fmt" + "strings" +) + +// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. +// +// ReplacePlaceholders takes a SQL statement and replaces each question mark +// placeholder with a (possibly different) SQL placeholder. +type PlaceholderFormat interface { + ReplacePlaceholders(sql string) (string, error) +} + +type placeholderDebugger interface { + debugPlaceholder() string +} + +var ( + // Question is a PlaceholderFormat instance that leaves placeholders as + // question marks. + Question = questionFormat{} + + // Dollar is a PlaceholderFormat instance that replaces placeholders with + // dollar-prefixed positional placeholders (e.g. $1, $2, $3). + Dollar = dollarFormat{} + + // Colon is a PlaceholderFormat instance that replaces placeholders with + // colon-prefixed positional placeholders (e.g. :1, :2, :3). + Colon = colonFormat{} + + // AtP is a PlaceholderFormat instance that replaces placeholders with + // "@p"-prefixed positional placeholders (e.g. @p1, @p2, @p3). + AtP = atpFormat{} +) + +type questionFormat struct{} + +func (questionFormat) ReplacePlaceholders(sql string) (string, error) { + return sql, nil +} + +func (questionFormat) debugPlaceholder() string { + return "?" +} + +type dollarFormat struct{} + +func (dollarFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "$") +} + +func (dollarFormat) debugPlaceholder() string { + return "$" +} + +type colonFormat struct{} + +func (colonFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, ":") +} + +func (colonFormat) debugPlaceholder() string { + return ":" +} + +type atpFormat struct{} + +func (atpFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "@p") +} + +func (atpFormat) debugPlaceholder() string { + return "@p" +} + +// Placeholders returns a string with count ? placeholders joined with commas. +func Placeholders(count int) string { + if count < 1 { + return "" + } + + return strings.Repeat(",?", count)[1:] +} + +func replacePositionalPlaceholders(sql, prefix string) (string, error) { + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + i++ + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "%s%d", prefix, i) + sql = sql[p+1:] + } + } + + buf.WriteString(sql) + return buf.String(), nil +} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go new file mode 100644 index 000000000000..74ffda92bde9 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/row.go @@ -0,0 +1,22 @@ +package squirrel + +// RowScanner is the interface that wraps the Scan method. +// +// Scan behaves like database/sql.Row.Scan. +type RowScanner interface { + Scan(...interface{}) error +} + +// Row wraps database/sql.Row to let squirrel return new errors on Scan. +type Row struct { + RowScanner + err error +} + +// Scan returns Row.err or calls RowScanner.Scan. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + return r.RowScanner.Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go new file mode 100644 index 000000000000..d55ce4c74079 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -0,0 +1,403 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type selectData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Options []string + Columns []Sqlizer + From Sqlizer + Joins []Sqlizer + WhereParts []Sqlizer + GroupBys []string + HavingParts []Sqlizer + OrderByParts []Sqlizer + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *selectData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *selectData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *selectData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { + sqlStr, args, err = d.toSqlRaw() + if err != nil { + return + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sqlStr) + return +} + +func (d *selectData) toSqlRaw() (sqlStr string, args []interface{}, err error) { + if len(d.Columns) == 0 { + err = fmt.Errorf("select statements must have at least one result column") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("SELECT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + if len(d.Columns) > 0 { + args, err = appendToSql(d.Columns, sql, ", ", args) + if err != nil { + return + } + } + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.Joins) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Joins, sql, " ", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.GroupBys) > 0 { + sql.WriteString(" GROUP BY ") + sql.WriteString(strings.Join(d.GroupBys, ", ")) + } + + if len(d.HavingParts) > 0 { + sql.WriteString(" HAVING ") + args, err = appendToSql(d.HavingParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderByParts) > 0 { + sql.WriteString(" ORDER BY ") + args, err = appendToSql(d.OrderByParts, sql, ", ", args) + if err != nil { + return + } + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr = sql.String() + return +} + +// Builder + +// SelectBuilder builds SQL SELECT statements. +type SelectBuilder builder.Builder + +func init() { + builder.Register(SelectBuilder{}, selectData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { + return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +// For most cases runner will be a database connection. +// +// Internally we use this to mock out the database connection for testing. +func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { + return setRunWith(b, runner).(SelectBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b SelectBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b SelectBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b SelectBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b SelectBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.ToSql() +} + +func (b SelectBuilder) toSqlRaw() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.toSqlRaw() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b SelectBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b SelectBuilder) PrefixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Prefixes", expr).(SelectBuilder) +} + +// Distinct adds a DISTINCT clause to the query. +func (b SelectBuilder) Distinct() SelectBuilder { + return b.Options("DISTINCT") +} + +// Options adds select option to the query +func (b SelectBuilder) Options(options ...string) SelectBuilder { + return builder.Extend(b, "Options", options).(SelectBuilder) +} + +// Columns adds result columns to the query. +func (b SelectBuilder) Columns(columns ...string) SelectBuilder { + parts := make([]interface{}, 0, len(columns)) + for _, str := range columns { + parts = append(parts, newPart(str)) + } + return builder.Extend(b, "Columns", parts).(SelectBuilder) +} + +// RemoveColumns remove all columns from query. +// Must add a new column with Column or Columns methods, otherwise +// return a error. +func (b SelectBuilder) RemoveColumns() SelectBuilder { + return builder.Delete(b, "Columns").(SelectBuilder) +} + +// Column adds a result column to the query. +// Unlike Columns, Column accepts args which will be bound to placeholders in +// the columns string, for example: +// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) +func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) +} + +// From sets the FROM clause of the query. +func (b SelectBuilder) From(from string) SelectBuilder { + return builder.Set(b, "From", newPart(from)).(SelectBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) +} + +// JoinClause adds a join clause to the query. +func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) +} + +// Join adds a JOIN clause to the query. +func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("JOIN "+join, rest...) +} + +// LeftJoin adds a LEFT JOIN clause to the query. +func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("LEFT JOIN "+join, rest...) +} + +// RightJoin adds a RIGHT JOIN clause to the query. +func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("RIGHT JOIN "+join, rest...) +} + +// InnerJoin adds a INNER JOIN clause to the query. +func (b SelectBuilder) InnerJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("INNER JOIN "+join, rest...) +} + +// CrossJoin adds a CROSS JOIN clause to the query. +func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("CROSS JOIN "+join, rest...) +} + +// Where adds an expression to the WHERE clause of the query. +// +// Expressions are ANDed together in the generated SQL. +// +// Where accepts several types for its pred argument: +// +// nil OR "" - ignored. +// +// string - SQL expression. +// If the expression has SQL placeholders then a set of arguments must be passed +// as well, one for each placeholder. +// +// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is +// transformed into an expression like " = ?", with the corresponding value +// bound to the placeholder. If the value is nil, the expression will be " +// IS NULL". If the value is an array or slice, the expression will be " IN +// (?,?,...)", with one placeholder for each item in the value. These expressions +// are ANDed together. +// +// Where will panic if pred isn't any of the above types. +func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { + if pred == nil || pred == "" { + return b + } + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) +} + +// GroupBy adds GROUP BY expressions to the query. +func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { + return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) +} + +// Having adds an expression to the HAVING clause of the query. +// +// See Where. +func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { + return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) +} + +// OrderByClause adds ORDER BY clause to the query. +func (b SelectBuilder) OrderByClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "OrderByParts", newPart(pred, args...)).(SelectBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { + for _, orderBy := range orderBys { + b = b.OrderByClause(orderBy) + } + + return b +} + +// Limit sets a LIMIT clause on the query. +func (b SelectBuilder) Limit(limit uint64) SelectBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) +} + +// Limit ALL allows to access all records with limit +func (b SelectBuilder) RemoveLimit() SelectBuilder { + return builder.Delete(b, "Limit").(SelectBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b SelectBuilder) Offset(offset uint64) SelectBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) +} + +// RemoveOffset removes OFFSET clause. +func (b SelectBuilder) RemoveOffset() SelectBuilder { + return builder.Delete(b, "Offset").(SelectBuilder) +} + +// Suffix adds an expression to the end of the query +func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b SelectBuilder) SuffixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Suffixes", expr).(SelectBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/select_ctx.go b/vendor/github.com/Masterminds/squirrel/select_ctx.go new file mode 100644 index 000000000000..4c42c13f4789 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *selectData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b SelectBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b SelectBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go new file mode 100644 index 000000000000..46d456eb620c --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel.go @@ -0,0 +1,183 @@ +// Package squirrel provides a fluent SQL generator. +// +// See https://github.com/Masterminds/squirrel for examples. +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +// Sqlizer is the interface that wraps the ToSql method. +// +// ToSql returns a SQL representation of the Sqlizer, along with a slice of args +// as passed to e.g. database/sql.Exec. It can also return an error. +type Sqlizer interface { + ToSql() (string, []interface{}, error) +} + +// rawSqlizer is expected to do what Sqlizer does, but without finalizing placeholders. +// This is useful for nested queries. +type rawSqlizer interface { + toSqlRaw() (string, []interface{}, error) +} + +// Execer is the interface that wraps the Exec method. +// +// Exec executes the given query as implemented by database/sql.Exec. +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Queryer is the interface that wraps the Query method. +// +// Query executes the given query as implemented by database/sql.Query. +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRower is the interface that wraps the QueryRow method. +// +// QueryRow executes the given query as implemented by database/sql.QueryRow. +type QueryRower interface { + QueryRow(query string, args ...interface{}) RowScanner +} + +// BaseRunner groups the Execer and Queryer interfaces. +type BaseRunner interface { + Execer + Queryer +} + +// Runner groups the Execer, Queryer, and QueryRower interfaces. +type Runner interface { + Execer + Queryer + QueryRower +} + +// WrapStdSql wraps a type implementing the standard SQL interface with methods that +// squirrel expects. +func WrapStdSql(stdSql StdSql) Runner { + return &stdsqlRunner{stdSql} +} + +// StdSql encompasses the standard methods of the *sql.DB type, and other types that +// wrap these methods. +type StdSql interface { + Query(string, ...interface{}) (*sql.Rows, error) + QueryRow(string, ...interface{}) *sql.Row + Exec(string, ...interface{}) (sql.Result, error) +} + +type stdsqlRunner struct { + StdSql +} + +func (r *stdsqlRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSql.QueryRow(query, args...) +} + +func setRunWith(b interface{}, runner BaseRunner) interface{} { + switch r := runner.(type) { + case StdSqlCtx: + runner = WrapStdSqlCtx(r) + case StdSql: + runner = WrapStdSql(r) + } + return builder.Set(b, "RunWith", runner) +} + +// RunnerNotSet is returned by methods that need a Runner if it isn't set. +var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") + +// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. +var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") + +// ExecWith Execs the SQL returned by s with db. +func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Exec(query, args...) +} + +// QueryWith Querys the SQL returned by s with db. +func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Query(query, args...) +} + +// QueryRowWith QueryRows the SQL returned by s with db. +func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRow(query, args...), err: err} +} + +// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed +// +// If ToSql returns an error, the result of this method will look like: +// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" +// +// IMPORTANT: As its name suggests, this function should only be used for +// debugging. While the string result *might* be valid SQL, this function does +// not try very hard to ensure it. Additionally, executing the output of this +// function with any untrusted user input is certainly insecure. +func DebugSqlizer(s Sqlizer) string { + sql, args, err := s.ToSql() + if err != nil { + return fmt.Sprintf("[ToSql error: %s]", err) + } + + var placeholder string + downCast, ok := s.(placeholderDebugger) + if !ok { + placeholder = "?" + } else { + placeholder = downCast.debugPlaceholder() + } + // TODO: dedupe this with placeholder.go + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, placeholder) + if p == -1 { + break + } + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + if i+1 > len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: too many placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "'%v'", args[i]) + // advance our sql string "cursor" beyond the arg we placed + sql = sql[p+1:] + i++ + } + } + if i < len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: not enough placeholders in %#v for %d args]", + sql, len(args)) + } + // "append" any remaning sql that won't need interpolating + buf.WriteString(sql) + return buf.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go new file mode 100644 index 000000000000..c20148ad33c7 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go @@ -0,0 +1,93 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + "errors" +) + +// NoContextSupport is returned if a db doesn't support Context. +var NoContextSupport = errors.New("DB does not support Context") + +// ExecerContext is the interface that wraps the ExecContext method. +// +// Exec executes the given query as implemented by database/sql.ExecContext. +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// QueryerContext is the interface that wraps the QueryContext method. +// +// QueryContext executes the given query as implemented by database/sql.QueryContext. +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRowerContext is the interface that wraps the QueryRowContext method. +// +// QueryRowContext executes the given query as implemented by database/sql.QueryRowContext. +type QueryRowerContext interface { + QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner +} + +// RunnerContext groups the Runner interface, along with the Context versions of each of +// its methods +type RunnerContext interface { + Runner + QueryerContext + QueryRowerContext + ExecerContext +} + +// WrapStdSqlCtx wraps a type implementing the standard SQL interface plus the context +// versions of the methods with methods that squirrel expects. +func WrapStdSqlCtx(stdSqlCtx StdSqlCtx) RunnerContext { + return &stdsqlCtxRunner{stdSqlCtx} +} + +// StdSqlCtx encompasses the standard methods of the *sql.DB type, along with the Context +// versions of those methods, and other types that wrap these methods. +type StdSqlCtx interface { + StdSql + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) +} + +type stdsqlCtxRunner struct { + StdSqlCtx +} + +func (r *stdsqlCtxRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRow(query, args...) +} + +func (r *stdsqlCtxRunner) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRowContext(ctx, query, args...) +} + +// ExecContextWith ExecContexts the SQL returned by s with db. +func ExecContextWith(ctx context.Context, db ExecerContext, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.ExecContext(ctx, query, args...) +} + +// QueryContextWith QueryContexts the SQL returned by s with db. +func QueryContextWith(ctx context.Context, db QueryerContext, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.QueryContext(ctx, query, args...) +} + +// QueryRowContextWith QueryRowContexts the SQL returned by s with db. +func QueryRowContextWith(ctx context.Context, db QueryRowerContext, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRowContext(ctx, query, args...), err: err} +} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go new file mode 100644 index 000000000000..9420c67f8e92 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/statement.go @@ -0,0 +1,104 @@ +package squirrel + +import "github.com/lann/builder" + +// StatementBuilderType is the type of StatementBuilder. +type StatementBuilderType builder.Builder + +// Select returns a SelectBuilder for this StatementBuilderType. +func (b StatementBuilderType) Select(columns ...string) SelectBuilder { + return SelectBuilder(b).Columns(columns...) +} + +// Insert returns a InsertBuilder for this StatementBuilderType. +func (b StatementBuilderType) Insert(into string) InsertBuilder { + return InsertBuilder(b).Into(into) +} + +// Replace returns a InsertBuilder for this StatementBuilderType with the +// statement keyword set to "REPLACE". +func (b StatementBuilderType) Replace(into string) InsertBuilder { + return InsertBuilder(b).statementKeyword("REPLACE").Into(into) +} + +// Update returns a UpdateBuilder for this StatementBuilderType. +func (b StatementBuilderType) Update(table string) UpdateBuilder { + return UpdateBuilder(b).Table(table) +} + +// Delete returns a DeleteBuilder for this StatementBuilderType. +func (b StatementBuilderType) Delete(from string) DeleteBuilder { + return DeleteBuilder(b).From(from) +} + +// PlaceholderFormat sets the PlaceholderFormat field for any child builders. +func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { + return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) +} + +// RunWith sets the RunWith field for any child builders. +func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { + return setRunWith(b, runner).(StatementBuilderType) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b StatementBuilderType) Where(pred interface{}, args ...interface{}) StatementBuilderType { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(StatementBuilderType) +} + +// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. +var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) + +// Select returns a new SelectBuilder, optionally setting some result columns. +// +// See SelectBuilder.Columns. +func Select(columns ...string) SelectBuilder { + return StatementBuilder.Select(columns...) +} + +// Insert returns a new InsertBuilder with the given table name. +// +// See InsertBuilder.Into. +func Insert(into string) InsertBuilder { + return StatementBuilder.Insert(into) +} + +// Replace returns a new InsertBuilder with the statement keyword set to +// "REPLACE" and with the given table name. +// +// See InsertBuilder.Into. +func Replace(into string) InsertBuilder { + return StatementBuilder.Replace(into) +} + +// Update returns a new UpdateBuilder with the given table name. +// +// See UpdateBuilder.Table. +func Update(table string) UpdateBuilder { + return StatementBuilder.Update(table) +} + +// Delete returns a new DeleteBuilder with the given table name. +// +// See DeleteBuilder.Table. +func Delete(from string) DeleteBuilder { + return StatementBuilder.Delete(from) +} + +// Case returns a new CaseBuilder +// "what" represents case value +func Case(what ...interface{}) CaseBuilder { + b := CaseBuilder(builder.EmptyBuilder) + + switch len(what) { + case 0: + case 1: + b = b.what(what[0]) + default: + b = b.what(newPart(what[0], what[1:]...)) + + } + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go new file mode 100644 index 000000000000..5bf267a13642 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher.go @@ -0,0 +1,121 @@ +package squirrel + +import ( + "database/sql" + "fmt" + "sync" +) + +// Prepareer is the interface that wraps the Prepare method. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. +type DBProxy interface { + Execer + Queryer + QueryRower + Preparer +} + +// NOTE: NewStmtCache is defined in stmtcacher_ctx.go (Go >= 1.8) or stmtcacher_noctx.go (Go < 1.8). + +// StmtCache wraps and delegates down to a Preparer type +// +// It also automatically prepares all statements sent to the underlying Preparer calls +// for Exec, Query and QueryRow and caches the returns *sql.Stmt using the provided +// query as the key. So that it can be automatically re-used. +type StmtCache struct { + prep Preparer + cache map[string]*sql.Stmt + mu sync.Mutex +} + +// Prepare delegates down to the underlying Preparer and caches the result +// using the provided query as a key +func (sc *StmtCache) Prepare(query string) (*sql.Stmt, error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := sc.prep.Prepare(query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// Exec delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Exec(query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Exec(args...) +} + +// Query delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Query(args...) +} + +// QueryRow delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) QueryRow(query string, args ...interface{}) RowScanner { + stmt, err := sc.Prepare(query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRow(args...) +} + +// Clear removes and closes all the currently cached prepared statements +func (sc *StmtCache) Clear() (err error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + for key, stmt := range sc.cache { + delete(sc.cache, key) + + if stmt == nil { + continue + } + + if cerr := stmt.Close(); cerr != nil { + err = cerr + } + } + + if err != nil { + return fmt.Errorf("one or more Stmt.Close failed; last error: %v", err) + } + + return +} + +type DBProxyBeginner interface { + DBProxy + Begin() (*sql.Tx, error) +} + +type stmtCacheProxy struct { + DBProxy + db *sql.DB +} + +func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { + return &stmtCacheProxy{DBProxy: NewStmtCache(db), db: db} +} + +func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { + return sp.db.Begin() +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go new file mode 100644 index 000000000000..53603cf4c954 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go @@ -0,0 +1,86 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" +) + +// PrepareerContext is the interface that wraps the Prepare and PrepareContext methods. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +// PrepareContext executes the given query as implemented by database/sql.PrepareContext. +type PreparerContext interface { + Preparer + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// DBProxyContext groups the Execer, Queryer, QueryRower and PreparerContext interfaces. +type DBProxyContext interface { + Execer + Queryer + QueryRower + PreparerContext +} + +// NewStmtCache returns a *StmtCache wrapping a PreparerContext that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep PreparerContext) *StmtCache { + return &StmtCache{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep PreparerContext) DBProxyContext { + return NewStmtCache(prep) +} + +// PrepareContext delegates down to the underlying PreparerContext and caches the result +// using the provided query as a key +func (sc *StmtCache) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { + ctxPrep, ok := sc.prep.(PreparerContext) + if !ok { + return nil, NoContextSupport + } + sc.mu.Lock() + defer sc.mu.Unlock() + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := ctxPrep.PrepareContext(ctx, query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// ExecContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) ExecContext(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.ExecContext(ctx, args...) +} + +// QueryContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryContext(ctx context.Context, query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.QueryContext(ctx, args...) +} + +// QueryRowContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRowContext(ctx, args...) +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go new file mode 100644 index 000000000000..deac96777b7f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go @@ -0,0 +1,21 @@ +// +build !go1.8 + +package squirrel + +import ( + "database/sql" +) + +// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep Preparer) *StmtCache { + return &StmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep Preparer) DBProxy { + return NewStmtCache(prep) +} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go new file mode 100644 index 000000000000..eb2a9c4dd9a0 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -0,0 +1,288 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/lann/builder" +) + +type updateData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Table string + SetClauses []setClause + From Sqlizer + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +type setClause struct { + column string + value interface{} +} + +func (d *updateData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *updateData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *updateData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Table) == 0 { + err = fmt.Errorf("update statements must specify a table") + return + } + if len(d.SetClauses) == 0 { + err = fmt.Errorf("update statements must have at least one Set clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("UPDATE ") + sql.WriteString(d.Table) + + sql.WriteString(" SET ") + setSqls := make([]string, len(d.SetClauses)) + for i, setClause := range d.SetClauses { + var valSql string + if vs, ok := setClause.value.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return "", nil, err + } + if _, ok := vs.(SelectBuilder); ok { + valSql = fmt.Sprintf("(%s)", vsql) + } else { + valSql = vsql + } + args = append(args, vargs...) + } else { + valSql = "?" + args = append(args, setClause.value) + } + setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) + } + sql.WriteString(strings.Join(setSqls, ", ")) + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// UpdateBuilder builds SQL UPDATE statements. +type UpdateBuilder builder.Builder + +func init() { + builder.Register(UpdateBuilder{}, updateData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { + return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { + return setRunWith(b, runner).(UpdateBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b UpdateBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.Exec() +} + +func (b UpdateBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.Query() +} + +func (b UpdateBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRow() +} + +func (b UpdateBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b UpdateBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(updateData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b UpdateBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b UpdateBuilder) PrefixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Prefixes", expr).(UpdateBuilder) +} + +// Table sets the table to be updated. +func (b UpdateBuilder) Table(table string) UpdateBuilder { + return builder.Set(b, "Table", table).(UpdateBuilder) +} + +// Set adds SET clauses to the query. +func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { + return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) +} + +// SetMap is a convenience method which calls .Set for each key/value pair in clauses. +func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { + keys := make([]string, len(clauses)) + i := 0 + for key := range clauses { + keys[i] = key + i++ + } + sort.Strings(keys) + for _, key := range keys { + val, _ := clauses[key] + b = b.Set(key, val) + } + return b +} + +// From adds FROM clause to the query +// FROM is valid construct in postgresql only. +func (b UpdateBuilder) From(from string) UpdateBuilder { + return builder.Set(b, "From", newPart(from)).(UpdateBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b UpdateBuilder) FromSelect(from SelectBuilder, alias string) UpdateBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(UpdateBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { + return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) +} + +// Suffix adds an expression to the end of the query +func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b UpdateBuilder) SuffixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Suffixes", expr).(UpdateBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/update_ctx.go b/vendor/github.com/Masterminds/squirrel/update_ctx.go new file mode 100644 index 000000000000..ad479f96f49f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *updateData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b UpdateBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go new file mode 100644 index 000000000000..976b63ace4ec --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/where.go @@ -0,0 +1,30 @@ +package squirrel + +import ( + "fmt" +) + +type wherePart part + +func newWherePart(pred interface{}, args ...interface{}) Sqlizer { + return &wherePart{pred: pred, args: args} +} + +func (p wherePart) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case rawSqlizer: + return pred.toSqlRaw() + case Sqlizer: + return pred.ToSql() + case map[string]interface{}: + return Eq(pred).ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string-keyed map or string, not %T", pred) + } + return +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 000000000000..584149b6ee28 --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000000..8915f02773f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go new file mode 100644 index 000000000000..23ddfab1a6bf --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -0,0 +1,323 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/containerd/log" + "github.com/klauspost/compress/zstd" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip + // Zstd is zstd compression algorithm. + Zstd +) + +const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" + +var ( + initPigz sync.Once + unpigzPath string +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +// DecompressReadCloser include the stream after decompress and the compress method detected. +type DecompressReadCloser interface { + io.ReadCloser + // GetCompression returns the compress method which is used before decompressing + GetCompression() Compression +} + +type readCloserWrapper struct { + io.Reader + compression Compression + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +func (r *readCloserWrapper) GetCompression() Compression { + return r.compression +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + gzipMagic = []byte{0x1F, 0x8B, 0x08} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, fn := range map[Compression]matcher{ + Gzip: magicNumberMatcher(gzipMagic), + Zstd: zstdMatcher(), + } { + if fn(source) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + switch compression := DetectCompression(bs); compression { + case Uncompressed: + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + compression: compression, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + case Zstd: + return zstd.NewWriter(dest) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + case Zstd: + return "zst" + } + return "" +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + initPigz.Do(func() { + if unpigzPath = detectPigz(); unpigzPath != "" { + log.L.Debug("using pigz for decompression") + } + }) + + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + if err := cmd.Start(); err != nil { + return nil, err + } + + go func() { + if err := cmd.Wait(); err != nil { + writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + writer.Close() + } + }() + + return reader, nil +} + +func detectPigz() string { + path, err := exec.LookPath("unpigz") + if err != nil { + log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") + return "" + } + + // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable + value := os.Getenv(disablePigzEnv) + if value == "" { + return path + } + + disable, err := strconv.ParseBool(value) + if err != nil { + log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) + return path + } + + if disable { + return "" + } + + return path +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go new file mode 100644 index 000000000000..3516494ac0bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go @@ -0,0 +1,28 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bytes" +) + +func FuzzDecompressStream(data []byte) int { + _, _ = DecompressStream(bytes.NewReader(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/content/adaptor.go b/vendor/github.com/containerd/containerd/content/adaptor.go new file mode 100644 index 000000000000..88bad2610e8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/adaptor.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. +func AdaptInfo(info Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go new file mode 100644 index 000000000000..2dc7bf8b52d8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -0,0 +1,207 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +// +// Overall content lifecycle: +// - Ingester is used to initiate a write operation (aka ingestion) +// - IngestManager is used to manage (e.g. list, abort) active ingestions +// - Once an ingestion is complete (see Writer.Commit), Provider is used to +// query a single piece of content by its digest +// - Manager is used to manage (e.g. list, delete) previously committed content +// +// Note that until ingestion is complete, its content is not visible through +// Provider or Manager. Once ingestion is complete, it is no longer exposed +// through IngestManager. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + +// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer +type ReaderAt interface { + io.ReaderAt + io.Closer + Size() int64 +} + +// Provider provides a reader interface for specific content +type Provider interface { + // ReaderAt only requires desc.Digest to be set. + // Other fields in the descriptor may be used internally for resolving + // the location of the actual data. + ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) +} + +// Ingester writes content +type Ingester interface { + // Writer initiates a writing operation (aka ingestion). A single ingestion + // is uniquely identified by its ref, provided using a WithRef option. + // Writer can be called multiple times with the same ref to access the same + // ingestion. + // Once all the data is written, use Writer.Commit to complete the ingestion. + Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) +} + +// IngestManager provides methods for managing ingestions. An ingestion is a +// not-yet-complete writing operation initiated using Ingester and identified +// by a ref string. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match + // the provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + +// Info holds content specific information +type Info struct { + Digest digest.Digest + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + Labels map[string]string +} + +// Status of a content operation (i.e. an ingestion) +type Status struct { + Ref string + Offset int64 + Total int64 + Expected digest.Digest + StartedAt time.Time + UpdatedAt time.Time +} + +// WalkFunc defines the callback for a blob walk. +type WalkFunc func(Info) error + +// InfoReaderProvider provides both info and reader for the specific content. +type InfoReaderProvider interface { + InfoProvider + Provider +} + +// InfoProvider provides info for content inspection. +type InfoProvider interface { + // Info will return metadata about content available in the content store. + // + // If the content is not present, ErrNotFound will be returned. + Info(ctx context.Context, dgst digest.Digest) (Info, error) +} + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + InfoProvider + + // Update updates mutable information related to content. + // If one or more fieldpaths are provided, only those + // fields will be updated. + // Mutable fields: + // labels.* + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Walk will call fn for each item in the content store which + // match the provided filters. If no filters are given all + // items will be walked. + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Delete removes the content from the store. + Delete(ctx context.Context, dgst digest.Digest) error +} + +// Writer handles writing of content into a content store +type Writer interface { + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. + io.WriteCloser + + // Digest may return empty digest or panics until committed. + Digest() digest.Digest + + // Commit commits the blob (but no roll-back is guaranteed on an error). + // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. + Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error + + // Status returns the current state of write + Status() (Status, error) + + // Truncate updates the size of the target blob + Truncate(size int64) error +} + +// Opt is used to alter the mutable properties of content +type Opt func(*Info) error + +// WithLabels allows labels to be set on content +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// WriterOpts is internally used by WriterOpt. +type WriterOpts struct { + Ref string + Desc ocispec.Descriptor +} + +// WriterOpt is used for passing options to Ingester.Writer. +type WriterOpt func(*WriterOpts) error + +// WithDescriptor specifies an OCI descriptor. +// Writer may optionally use the descriptor internally for resolving +// the location of the actual data. +// Write does not require any field of desc to be set. +// If the data size is unknown, desc.Size should be set to 0. +// Some implementations may also accept negative values as "unknown". +func WithDescriptor(desc ocispec.Descriptor) WriterOpt { + return func(opts *WriterOpts) error { + opts.Desc = desc + return nil + } +} + +// WithRef specifies a ref string. +func WithRef(ref string) WriterOpt { + return func(opts *WriterOpts) error { + opts.Ref = ref + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go new file mode 100644 index 000000000000..93bcdde106e4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -0,0 +1,340 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/containerd/log" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/randutil" +) + +var ErrReset = errors.New("writer has been reset") + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +type reader interface { + Reader() io.Reader +} + +// NewReader returns a io.Reader from a ReaderAt +func NewReader(ra ReaderAt) io.Reader { + if rd, ok := ra.(reader); ok { + return rd.Reader() + } + return io.NewSectionReader(ra, 0, ra.Size()) +} + +// ReadBlob retrieves the entire contents of the blob from the provider. +// +// Avoid using this for large blobs, such as layers. +func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return desc.Data, nil + } + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + + p := make([]byte, ra.Size()) + + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return p, err +} + +// WriteBlob writes data with the expected digest into the content store. If +// expected already exists, the method returns immediately and the reader will +// not be consumed. +// +// This is useful when the digest and size are known beforehand. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { + cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed to open writer: %w", err) + } + + return nil // already present + } + defer cw.Close() + + return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) +} + +// OpenWriter opens a new writer for the given reference, retrying if the writer +// is locked until the reference is available or returns an error. +func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { + var ( + cw Writer + err error + retry = 16 + ) + for { + cw, err = cs.Writer(ctx, opts...) + if err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + // TODO: Check status to determine if the writer is active, + // continue waiting while active, otherwise return lock + // error or abort. Requires asserting for an ingest manager + + select { + case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): + if retry < 2048 { + retry = retry << 1 + } + continue + case <-ctx.Done(): + // Propagate lock error + return nil, err + } + + } + break + } + + return cw, err +} + +// Copy copies data with the expected digest from the reader into the +// provided content store writer. This copy commits the writer. +// +// This is useful when the digest and size are known beforehand. When +// the size or digest is unknown, these values may be empty. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r := or + if ws.Offset > 0 { + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + for i := 0; ; i++ { + if i >= 1 { + log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") + } + copied, err := copyWithBuffer(cw, r) + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + } + return nil + } +} + +// CopyReaderAt copies to a writer from a given reader at for the given +// number of bytes. This copy does not commit the writer. +func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { + ws, err := cw.Status() + if err != nil { + return err + } + + copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if copied < n { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + return nil +} + +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, fmt.Errorf("failed to get status: %w", err) + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + return copyWithBuffer(cw, r) +} + +// seekReader attempts to seek the reader to the given offset, either by +// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding +// up to the given offset. +func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { + // attempt to resolve r as a seeker and setup the offset. + seeker, ok := r.(io.Seeker) + if ok { + nn, err := seeker.Seek(offset, io.SeekStart) + if nn != offset { + if err == nil { + err = fmt.Errorf("unexpected seek location without seek error") + } + return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) + } + + if err != nil { + return nil, err + } + + return r, nil + } + + // ok, let's try io.ReaderAt! + readerAt, ok := r.(io.ReaderAt) + if ok && size > offset { + sr := io.NewSectionReader(readerAt, offset, size) + return sr, nil + } + + // well then, let's just discard up to the offset + n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) + if err != nil { + return nil, fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return nil, errors.New("unable to discard to offset") + } + + return r, nil +} + +// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer +// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have +// a full buffer before we do a write operation to dst to reduce overheads associated +// with the write operations of small buffers. +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + // If the reader has a WriteTo method, use it to do the copy. + // Avoids an allocation and a copy. + if wt, ok := src.(io.WriterTo); ok { + return wt.WriteTo(dst) + } + // Similarly, if the writer has a ReadFrom method, use it to do the copy. + if rt, ok := dst.(io.ReaderFrom); ok { + return rt.ReadFrom(src) + } + bufRef := bufPool.Get().(*[]byte) + defer bufPool.Put(bufRef) + buf := *bufRef + for { + nr, er := io.ReadAtLeast(src, buf, len(buf)) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + // If an EOF happens after reading fewer than the requested bytes, + // ReadAtLeast returns ErrUnexpectedEOF. + if er != io.EOF && er != io.ErrUnexpectedEOF { + err = er + } + break + } + } + return +} + +// Exists returns whether an attempt to access the content would not error out +// with an ErrNotFound error. It will return an encountered error if it was +// different than ErrNotFound. +func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) { + _, err := provider.Info(ctx, desc.Digest) + if errdefs.IsNotFound(err) { + return false, nil + } + return err == nil, err +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000000..de22cadd4195 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,72 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +package errdefs + +import ( + "github.com/containerd/errdefs" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// These errors map closely to grpc errors. +var ( + ErrUnknown = errdefs.ErrUnknown + ErrInvalidArgument = errdefs.ErrInvalidArgument + ErrNotFound = errdefs.ErrNotFound + ErrAlreadyExists = errdefs.ErrAlreadyExists + ErrPermissionDenied = errdefs.ErrPermissionDenied + ErrResourceExhausted = errdefs.ErrResourceExhausted + ErrFailedPrecondition = errdefs.ErrFailedPrecondition + ErrConflict = errdefs.ErrConflict + ErrNotModified = errdefs.ErrNotModified + ErrAborted = errdefs.ErrAborted + ErrOutOfRange = errdefs.ErrOutOfRange + ErrNotImplemented = errdefs.ErrNotImplemented + ErrInternal = errdefs.ErrInternal + ErrUnavailable = errdefs.ErrUnavailable + ErrDataLoss = errdefs.ErrDataLoss + ErrUnauthenticated = errdefs.ErrUnauthenticated + + IsCanceled = errdefs.IsCanceled + IsUnknown = errdefs.IsUnknown + IsInvalidArgument = errdefs.IsInvalidArgument + IsDeadlineExceeded = errdefs.IsDeadlineExceeded + IsNotFound = errdefs.IsNotFound + IsAlreadyExists = errdefs.IsAlreadyExists + IsPermissionDenied = errdefs.IsPermissionDenied + IsResourceExhausted = errdefs.IsResourceExhausted + IsFailedPrecondition = errdefs.IsFailedPrecondition + IsConflict = errdefs.IsConflict + IsNotModified = errdefs.IsNotModified + IsAborted = errdefs.IsAborted + IsOutOfRange = errdefs.IsOutOfRange + IsNotImplemented = errdefs.IsNotImplemented + IsInternal = errdefs.IsInternal + IsUnavailable = errdefs.IsUnavailable + IsDataLoss = errdefs.IsDataLoss + IsUnauthorized = errdefs.IsUnauthorized +) diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000000..11091b1db0d7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Error(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Error(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Error(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Error(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Error(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Error(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Error(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Error(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 000000000000..5a9c559c1e0c --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 000000000000..dcc569a4b7d2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +package filters + +import ( + "regexp" + + "github.com/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 000000000000..790597aaf2c8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,294 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerrf(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerrf(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerrf(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerrf(pos int, format string, args ...interface{}) error { + return p.mkerr(pos, fmt.Sprintf(format, args...)) +} + +func (p *parser) mkerr(pos int, msg string) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: msg, + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 000000000000..5c800ef846cc --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 000000000000..6a485467b8a0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/images/annotations.go b/vendor/github.com/containerd/containerd/images/annotations.go new file mode 100644 index 000000000000..47d92104cddc --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/diffid.go b/vendor/github.com/containerd/containerd/images/diffid.go new file mode 100644 index 000000000000..85577eedee4a --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/diffid.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/labels" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// GetDiffID gets the diff ID of the layer blob descriptor. +func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { + switch desc.MediaType { + case + // If the layer is already uncompressed, we can just return its digest + MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerForeign, + ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated + return desc.Digest, nil + } + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return "", err + } + v, ok := info.Labels[labels.LabelUncompressed] + if ok { + // Fast path: if the image is already unpacked, we can use the label value + return digest.Parse(v) + } + // if the image is not unpacked, we may not have the label + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return "", err + } + defer ra.Close() + r := content.NewReader(ra) + uR, err := compression.DecompressStream(r) + if err != nil { + return "", err + } + defer uR.Close() + digester := digest.Canonical.Digester() + hashW := digester.Hash() + if _, err := io.Copy(hashW, uR); err != nil { + return "", err + } + if err := ra.Close(); err != nil { + return "", err + } + digest := digester.Digest() + // memorize the computed value + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[labels.LabelUncompressed] = digest.String() + if _, err := cs.Update(ctx, info, "labels"); err != nil { + logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) + } + return digest, nil +} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go new file mode 100644 index 000000000000..a685092e2cb2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -0,0 +1,323 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" +) + +var ( + // ErrSkipDesc is used to skip processing of a descriptor and + // its descendants. + ErrSkipDesc = errors.New("skip descriptor") + + // ErrStopHandler is used to signify that the descriptor + // has been handled and should not be handled further. + // This applies only to a single descriptor in a handler + // chain and does not apply to descendant descriptors. + ErrStopHandler = errors.New("stop handler") + + // ErrEmptyWalk is used when the WalkNotEmpty handlers return no + // children (e.g.: they were filtered out). + ErrEmptyWalk = errors.New("image might be filtered out") +) + +// Handler handles image manifests +type Handler interface { + Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) +} + +// HandlerFunc function implementing the Handler interface +type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) + +// Handle image manifests +func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + return fn(ctx, desc) +} + +// Handlers returns a handler that will run the handlers in sequence. +// +// A handler may return `ErrStopHandler` to stop calling additional handlers +func Handlers(handlers ...Handler) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + var children []ocispec.Descriptor + for _, handler := range handlers { + ch, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrStopHandler) { + break + } + return nil, err + } + + children = append(children, ch...) + } + + return children, nil + } +} + +// Walk the resources of an image and call the handler for each. If the handler +// decodes the sub-resources for each image, +// +// This differs from dispatch in that each sibling resource is considered +// synchronously. +func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + for _, desc := range descs { + + children, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrSkipDesc) { + continue // don't traverse the children. + } + return err + } + + if len(children) > 0 { + if err := Walk(ctx, handler, children...); err != nil { + return err + } + } + } + return nil +} + +// WalkNotEmpty works the same way Walk does, with the exception that it ensures that +// some children are still found by Walking the descriptors (for example, not all of +// them have been filtered out by one of the handlers). If there are no children, +// then an ErrEmptyWalk error is returned. +func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + isEmpty := true + var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := handler.Handle(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + isEmpty = false + } + + return children, nil + } + + err := Walk(ctx, notEmptyHandler, descs...) + if err != nil { + return err + } + + if isEmpty { + return ErrEmptyWalk + } + + return nil +} + +// Dispatch runs the provided handler for content specified by the descriptors. +// If the handler decode subresources, they will be visited, as well. +// +// Handlers for siblings are run in parallel on the provided descriptors. A +// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse +// any children. +// +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// +// Typically, this function will be used with `FetchHandler`, often composed +// with other handlers. +// +// If any handler returns an error, the dispatch session will be canceled. +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { + eg, ctx2 := errgroup.WithContext(ctx) + for _, desc := range descs { + desc := desc + + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } + + eg.Go(func() error { + desc := desc + + children, err := handler.Handle(ctx2, desc) + if limiter != nil { + limiter.Release(1) + } + if err != nil { + if errors.Is(err, ErrSkipDesc) { + return nil // don't traverse the children. + } + return err + } + + if len(children) > 0 { + return Dispatch(ctx2, handler, limiter, children...) + } + + return nil + }) + } + + return eg.Wait() +} + +// ChildrenHandler decodes well-known manifest types and returns their children. +// +// This is useful for supporting recursive fetch and other use cases where you +// want to do a full walk of resources. +// +// One can also replace this with another implementation to allow descending of +// arbitrary types. +func ChildrenHandler(provider content.Provider) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return SetChildrenMappedLabels(manager, f, nil) +} + +// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +// The label map allows the caller to control the labels per child descriptor. +// For returned labels, the index of the child will be appended to the end +// except for the first index when the returned label does not end with '.'. +func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { + if labelMap == nil { + labelMap = ChildGCLabels + } + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + var ( + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields = []string{} + keys = map[string]uint{} + ) + for _, ch := range children { + labelKeys := labelMap(ch) + for _, key := range labelKeys { + idx := keys[key] + keys[key] = idx + 1 + if idx > 0 || key[len(key)-1] == '.' { + key = fmt.Sprintf("%s%d", key, idx) + } + + info.Labels[key] = ch.Digest.String() + fields = append(fields, "labels."+key) + } + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatforms is a handler wrapper which limits the descriptors returned +// based on matching the specified platform matcher. +func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + + if m == nil { + descs = children + } else { + for _, d := range children { + if d.Platform == nil || m.Match(*d.Platform) { + descs = append(descs, d) + } + } + } + + return descs, nil + } +} + +// LimitManifests is a handler wrapper which filters the manifest descriptors +// returned using the provided platform. +// The results will be ordered according to the comparison operator and +// use the ordering in the manifests for equal matches. +// A limit of 0 or less is considered no limit. +// A not found error is returned if no manifest is matched. +func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + switch desc.MediaType { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + sort.SliceStable(children, func(i, j int) bool { + if children[i].Platform == nil { + return false + } + if children[j].Platform == nil { + return true + } + return m.Less(*children[i].Platform, *children[j].Platform) + }) + + if n > 0 { + if len(children) == 0 { + return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) + } + if len(children) > n { + children = children[:n] + } + } + default: + // only limit manifests from an index + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go new file mode 100644 index 000000000000..8bebae19b364 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -0,0 +1,444 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/containerd/log" + "github.com/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" +) + +// Image provides the model for how containerd views container images. +type Image struct { + // Name of the image. + // + // To be pulled, it must be a reference compatible with resolvers. + // + // This field is required. + Name string + + // Labels provide runtime decoration for the image record. + // + // There is no default behavior for how these labels are propagated. They + // only decorate the static metadata object. + // + // This field is optional. + Labels map[string]string + + // Target describes the root content for this image. Typically, this is + // a manifest, index or manifest list. + Target ocispec.Descriptor + + CreatedAt, UpdatedAt time.Time +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + +// Store and interact with images +type Store interface { + Get(ctx context.Context, name string) (Image, error) + List(ctx context.Context, filters ...string) ([]Image, error) + Create(ctx context.Context, image Image) (Image, error) + + // Update will replace the data in the store with the provided image. If + // one or more fieldpaths are provided, only those fields will be updated. + Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) + + Delete(ctx context.Context, name string, opts ...DeleteOpt) error +} + +// TODO(stevvooe): Many of these functions make strong platform assumptions, +// which are untrue in a lot of cases. More refactoring must be done here to +// make this work in all cases. + +// Config resolves the image configuration descriptor. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + return Config(ctx, provider, image.Target, platform) +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { + desc, err := image.Config(ctx, provider, platform) + if err != nil { + return nil, err + } + return RootFS(ctx, provider, desc) +} + +// Size returns the total size of an image's packed resources. +func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { + var size int64 + return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + size += desc.Size + return nil, nil + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) +} + +type platformManifest struct { + p *ocispec.Platform + m *ocispec.Manifest +} + +// Manifest resolves a manifest from the image for the given platform. +// +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// +// TODO(stevvooe): This violates the current platform agnostic approach to this +// package by returning a specific manifest type. We'll need to refactor this +// to return a manifest descriptor or decide that we want to bring the API in +// this direction because this abstraction is not needed. +func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { + var ( + limit = 1 + m []platformManifest + wasIndex bool + ) + + if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Digest != image.Digest && platform != nil { + if desc.Platform != nil && !platform.Match(*desc.Platform) { + return nil, nil + } + + if desc.Platform == nil { + p, err := content.ReadBlob(ctx, provider, manifest.Config) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { + return nil, nil + } + + } + } + + m = append(m, platformManifest{ + p: desc.Platform, + m: &manifest, + }) + + return nil, nil + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + if platform == nil { + return idx.Manifests, nil + } + + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + + wasIndex = true + + if len(descs) > limit { + return descs[:limit], nil + } + return descs, nil + } + return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) + }), image); err != nil { + return ocispec.Manifest{}, err + } + + if len(m) == 0 { + err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) + if wasIndex { + err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) + } + return ocispec.Manifest{}, err + } + return *m[0].m, nil +} + +// Config resolves the image configuration descriptor using a content provided +// to resolve child resources on the image. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + manifest, err := Manifest(ctx, provider, image, platform) + if err != nil { + return ocispec.Descriptor{}, err + } + return manifest.Config, err +} + +// Platforms returns one or more platforms supported by the image. +func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { + var platformSpecs []ocispec.Platform + return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Platform != nil { + if desc.Platform.OS == "unknown" || desc.Platform.Architecture == "unknown" { + return nil, ErrSkipDesc + } + platformSpecs = append(platformSpecs, *desc.Platform) + return nil, ErrSkipDesc + } + + switch desc.MediaType { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + platformSpecs = append(platformSpecs, + platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) + } + return nil, nil + }), ChildrenHandler(provider)), image) +} + +// Check returns nil if the all components of an image are available in the +// provider for the specified platform. +// +// If available is true, the caller can assume that required represents the +// complete set of content required for the image. +// +// missing will have the components that are part of required but not available +// in the provider. +// +// If there is a problem resolving content, an error will be returned. +func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { + mfst, err := Manifest(ctx, provider, image, platform) + if err != nil { + if errdefs.IsNotFound(err) { + return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil + } + + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) + } + + // TODO(stevvooe): It is possible that referenced components could have + // children, but this is rare. For now, we ignore this and only verify + // that manifest components are present. + required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) + + for _, desc := range required { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + missing = append(missing, desc) + continue + } else { + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) + } + } + ra.Close() + present = append(present, desc) + + } + + return true, required, present, missing, nil +} + +// Children returns the immediate children of content described by the descriptor. +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + descs = append(descs, index.Manifests...) + default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) || IsAttestationType(desc.MediaType) { + // childless data types. + return nil, nil + } + log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated. +type unknownDocument struct { + MediaType string `json:"mediaType,omitempty"` + Config json.RawMessage `json:"config,omitempty"` + Layers json.RawMessage `json:"layers,omitempty"` + Manifests json.RawMessage `json:"manifests,omitempty"` + FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 +} + +// validateMediaType returns an error if the byte slice is invalid JSON or if +// the media type identifies the blob as one format but it contains elements of +// another format. +func validateMediaType(b []byte, mt string) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if len(doc.FSLayers) != 0 { + return fmt.Errorf("media-type: schema 1 not supported") + } + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if len(doc.Manifests) != 0 || + doc.MediaType == MediaTypeDockerSchema2ManifestList || + doc.MediaType == ocispec.MediaTypeImageIndex { + return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) + } + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + if len(doc.Config) != 0 || len(doc.Layers) != 0 || + doc.MediaType == MediaTypeDockerSchema2Manifest || + doc.MediaType == ocispec.MediaTypeImageManifest { + return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) + } + } + return nil +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return nil, err + } + + var config ocispec.Image + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + return config.RootFS.DiffIDs, nil +} diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go new file mode 100644 index 000000000000..843adcadc7ec --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/importexport.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Importer is the interface for image importer. +type Importer interface { + // Import imports an image from a tar stream. + Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) +} + +// Exporter is the interface for image exporter. +type Exporter interface { + // Export exports an image to a tar stream. + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error +} diff --git a/vendor/github.com/containerd/containerd/images/labels.go b/vendor/github.com/containerd/containerd/images/labels.go new file mode 100644 index 000000000000..06dfed572d43 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/labels.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" +) diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go new file mode 100644 index 000000000000..49d2a5b1c5d4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -0,0 +1,228 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// mediatype definitions for image components handled in containerd. +// +// oci components are generally referenced directly, although we may centralize +// here for clarity. +const ( + MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" + MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" + MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + + // Checkpoint/Restore Media Types + + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" + + // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + + // Encrypted media types + + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" + MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" + + // In-toto attestation + MediaTypeInToto = "application/vnd.in-toto+json" +) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + case "zstd": + return "zstd", nil + } + } + return "", nil + default: + return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (mediaType string, suffixes []string) { + if mt == "" { + return "", []string{} + } + mediaType, ext, ok := strings.Cut(mt, "+") + if !ok { + return mediaType, []string{} + } + + // Splitting the extensions following the mediatype "(+)gzip+encrypted". + // We expect this to be a limited list, so add an arbitrary limit (50). + // + // Note that DiffCompression is only using the last element, so perhaps we + // should split on the last "+" only. + suffixes = strings.SplitN(ext, "+", 50) + sort.Strings(suffixes) + return mediaType, suffixes +} + +// IsNonDistributable returns true if the media type is non-distributable. +func IsNonDistributable(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || + strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") +} + +// IsLayerType returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + switch base, _ := parseMediaTypes(mt); base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsDockerType returns true if the media type has "application/vnd.docker." prefix +func IsDockerType(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.docker.") +} + +// IsManifestType returns true if the media type is an OCI-compatible manifest. +// No support for schema1 manifest. +func IsManifestType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return true + default: + return false + } +} + +// IsIndexType returns true if the media type is an OCI-compatible index. +func IsIndexType(mt string) bool { + switch mt { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + return true + default: + return false + } +} + +// IsConfigType returns true if the media type is an OCI-compatible image config. +// No support for containerd checkpoint configs. +func IsConfigType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + return true + default: + return false + } +} + +// IsKnownConfig returns true if the media type is a known config type, +// including containerd checkpoint configs +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} + +// IsAttestationType returns true if the media type is an attestation type +func IsAttestationType(mt string) bool { + switch mt { + case MediaTypeInToto: + return true + default: + return false + } +} + +// ChildGCLabels returns the label for a given descriptor to reference it +func ChildGCLabels(desc ocispec.Descriptor) []string { + mt := desc.MediaType + if IsKnownConfig(mt) { + return []string{"containerd.io/gc.ref.content.config"} + } + + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return []string{"containerd.io/gc.ref.content.m."} + } + + if IsLayerType(mt) { + return []string{"containerd.io/gc.ref.content.l."} + } + + return []string{"containerd.io/gc.ref.content."} +} + +// ChildGCLabelsFilterLayers returns the labels for a given descriptor to +// reference it, skipping layer media types +func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { + if IsLayerType(desc.MediaType) { + return nil + } + return ChildGCLabels(desc) +} diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go new file mode 100644 index 000000000000..0f9bab5c5d46 --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/labels.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +// LabelUncompressed is added to compressed layer contents. +// The value is digest of the uncompressed content. +const LabelUncompressed = "containerd.io/uncompressed" + +// LabelSharedNamespace is added to a namespace to allow that namespaces +// contents to be shared. +const LabelSharedNamespace = "containerd.io/namespace.shareable" + +// LabelDistributionSource is added to content to indicate its origin. +// e.g., "containerd.io/distribution.source.docker.io=library/redis" +const LabelDistributionSource = "containerd.io/distribution.source" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go new file mode 100644 index 000000000000..f83b5dde294d --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "fmt" + + "github.com/containerd/containerd/errdefs" +) + +const ( + maxSize = 4096 + // maximum length of key portion of error message if len of key + len of value > maxSize + keyMaxLen = 64 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + total := len(k) + len(v) + if total > maxSize { + if len(k) > keyMaxLen { + k = k[:keyMaxLen] + } + return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go new file mode 100644 index 000000000000..f4b657d7dd87 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package randutil provides utilities for [cyrpto/rand]. +package randutil + +import ( + "crypto/rand" + "math" + "math/big" +) + +// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. +func Int63n(n int64) int64 { + b, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + panic(err) + } + return b.Int64() +} + +// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. +func Int63() int64 { + return Int63n(math.MaxInt64) +} + +// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. +func Intn(n int) int { + return int(Int63n(int64(n))) +} + +// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. +func Int() int { + return int(Int63()) +} diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go new file mode 100644 index 000000000000..14af02769c47 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -0,0 +1,416 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + + "github.com/containerd/log" + "github.com/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" +) + +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + +// MakeRefKey returns a unique reference for the descriptor. This reference can be +// used to lookup ongoing processes related to the descriptor. This function +// may look to the context to namespace the reference appropriately. +func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + key := desc.Digest.String() + if desc.Annotations != nil { + if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { + key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) + } + } + + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + key + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + return "manifest-" + key + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: + return "index-" + key + case images.IsLayerType(mt): + return "layer-" + key + case images.IsKnownConfig(mt): + return "config-" + key + case images.IsAttestationType(desc.MediaType): + return "attestation-" + key + default: + log.G(ctx).Warnf("reference for unknown type: %s", mt) + return "unknown-" + key + } +} + +// FetchHandler returns a handler that will fetch all content into the ingester +// discovered in a call to Dispatch. Use with ChildrenHandler to do a full +// recursive fetch. +func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + return nil, fmt.Errorf("%v not supported", desc.MediaType) + default: + err := Fetch(ctx, ingester, fetcher, desc) + if errdefs.IsAlreadyExists(err) { + return nil, nil + } + return nil, err + } + } +} + +// Fetch fetches the given digest into the provided ingester +func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch") + + cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + if err != nil { + return err + } + defer cw.Close() + + ws, err := cw.Status() + if err != nil { + return err + } + + if desc.Size == 0 { + // most likely a poorly configured registry/web front end which responded with no + // Content-Length header; unable (not to mention useless) to commit a 0-length entry + // into the content store. Error out here otherwise the error sent back is confusing + return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument) + } + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + return err + } + + if desc.Size == int64(len(desc.Data)) { + return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest) + } + + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) +} + +// PushHandler returns a handler that will push all content from the provider +// using a writer from the pusher. +func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + err := push(ctx, provider, pusher, desc) + return nil, err + } +} + +func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("push") + + var ( + cw content.Writer + err error + ) + if cs, ok := pusher.(content.Ingester); ok { + cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + } else { + cw, err = pusher.Push(ctx, desc) + } + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + return nil + } + defer cw.Close() + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + rd := io.NewSectionReader(ra, 0, desc.Size) + return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) +} + +// PushContent pushes content specified by the descriptor from the provider. +// +// Base handlers can be provided which will be called before any push specific +// handlers. +// +// If the passed in content.Provider is also a content.InfoProvider (such as +// content.Manager) then this will also annotate the distribution sources using +// labels prefixed with "containerd.io/distribution.source". +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + + var m sync.Mutex + manifests := []ocispec.Descriptor{} + indexStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + m.Lock() + manifests = append(manifests, desc) + m.Unlock() + return nil, images.ErrStopHandler + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + indexStack = append(indexStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + var handler images.Handler + if m, ok := store.(content.InfoProvider); ok { + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) + handler = images.Handlers(annotateHandler, filterHandler, pushHandler) + } else { + handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler) + } + + if wrapper != nil { + handler = wrapper(handler) + } + + if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { + return err + } + + if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil { + return err + } + + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(indexStack) - 1; i >= 0; i-- { + err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i]) + if err != nil { + // TODO(estesp): until we have a more complete method for index push, we need to report + // missing dependencies in an index/manifest list by sensing the "400 Bad Request" + // as a marker for this problem + if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) + } + return err + } + } + + return nil +} + +// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable". +// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. +// +// This is based on the media type of the content: +// - application/vnd.oci.image.layer.nondistributable +// - application/vnd.docker.image.rootfs.foreign +func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if images.IsNonDistributable(desc.MediaType) { + log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") + return nil, images.ErrSkipDesc + } + + if images.IsLayerType(desc.MediaType) { + return nil, nil + } + + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + if len(children) == 0 { + return nil, nil + } + + out := make([]ocispec.Descriptor, 0, len(children)) + for _, child := range children { + if !images.IsNonDistributable(child.MediaType) { + out = append(out, child) + } else { + log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob") + } + } + return out, nil + } +} + +// FilterManifestByPlatformHandler allows Handler to handle non-target +// platform's manifest and configuration data. +func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // no platform information + if desc.Platform == nil || m == nil { + return children, nil + } + + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if m.Match(*desc.Platform) { + descs = children + } else { + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + + descs = append(descs, child) + } + } + } + default: + descs = children + } + return descs, nil + } +} + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // Distribution source is only used for config or blob but may be inherited from + // a manifest or manifest list + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + parentSourceAnnotations := desc.Annotations + var parentLabels map[string]string + if pi, err := provider.Info(ctx, desc.Digest); err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } else { + parentLabels = pi.Labels + } + + for i := range children { + child := children[i] + + info, err := provider.Info(ctx, child.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } + copyDistributionSourceLabels(info.Labels, &child) + + // Annotate with parent labels for cross repo mount or fetch. + // Parent sources may apply to all children since most registries + // enforce that children exist before the manifests. + copyDistributionSourceLabels(parentSourceAnnotations, &child) + copyDistributionSourceLabels(parentLabels, &child) + + children[i] = child + } + return children, nil + } +} + +func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) { + for k, v := range from { + if !strings.HasPrefix(k, labels.LabelDistributionSource+".") { + continue + } + + if to.Annotations == nil { + to.Annotations = make(map[string]string) + } else { + // Only propagate the parent label if the child doesn't already have it. + if _, has := to.Annotations[k]; has { + continue + } + } + to.Annotations[k] = v + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go new file mode 100644 index 000000000000..f200c84bc7da --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver provides remotes based on a locator. +type Resolver interface { + // Resolve attempts to resolve the reference into a name and descriptor. + // + // The argument `ref` should be a scheme-less URI representing the remote. + // Structurally, it has a host and path. The "host" can be used to directly + // reference a specific host or be matched against a specific handler. + // + // The returned name should be used to identify the referenced entity. + // Depending on the remote namespace, this may be immutable or mutable. + // While the name may differ from ref, it should itself be a valid ref. + // + // If the resolution fails, an error will be returned. + Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) + + // Fetcher returns a new fetcher for the provided reference. + // All content fetched from the returned fetcher will be + // from the namespace referred to by ref. + Fetcher(ctx context.Context, ref string) (Fetcher, error) + + // Pusher returns a new pusher for the provided reference + // The returned Pusher should satisfy content.Ingester and concurrent attempts + // to push the same blob using the Ingester API should result in ErrUnavailable. + Pusher(ctx context.Context, ref string) (Pusher, error) +} + +// Fetcher fetches content. +// A fetcher implementation may implement the FetcherByDigest interface too. +type Fetcher interface { + // Fetch the resource identified by the descriptor. + Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +} + +// FetcherByDigest fetches content by the digest. +type FetcherByDigest interface { + // FetchByDigest fetches the resource identified by the digest. + // + // FetcherByDigest usually returns an incomplete descriptor. + // Typically, the media type is always set to "application/octet-stream", + // and the annotations are unset. + FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) +} + +// Pusher pushes content +type Pusher interface { + // Push returns a content writer for the given resource identified + // by the descriptor. + Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) +} + +// FetcherFunc allows package users to implement a Fetcher with just a +// function. +type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch content +func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, desc) +} + +// PusherFunc allows package users to implement a Pusher with just a +// function. +type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) + +// Push content +func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return fn(ctx, desc) +} diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes new file mode 100644 index 000000000000..a0717e4b3b90 --- /dev/null +++ b/vendor/github.com/containerd/platforms/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml new file mode 100644 index 000000000000..a695775df49d --- /dev/null +++ b/vendor/github.com/containerd/platforms/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE new file mode 100644 index 000000000000..584149b6ee28 --- /dev/null +++ b/vendor/github.com/containerd/platforms/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md new file mode 100644 index 000000000000..2059de771c56 --- /dev/null +++ b/vendor/github.com/containerd/platforms/README.md @@ -0,0 +1,32 @@ +# platforms + +A Go package for formatting, normalizing and matching container platforms. + +This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52). + +## Platform Specifier + +While the OCI platform specifications provide a tool for components to +specify structured information, user input typically doesn't need the full +context and much can be inferred. To solve this problem, this package introduces +"specifiers". A specifier has the format +`||/[/]`. The user can provide either the +operating system or the architecture or both. + +An example of a common specifier is `linux/amd64`. If the host has a default +runtime that matches this, the user can simply provide the component that +matters. For example, if an image provides `amd64` and `arm64` support, the +operating system, `linux` can be inferred, so they only have to provide +`arm64` or `amd64`. Similar behavior is implemented for operating systems, +where the architecture may be known but a runtime may support images from +different operating systems. + +## Project details + +**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go new file mode 100644 index 000000000000..3913ef663731 --- /dev/null +++ b/vendor/github.com/containerd/platforms/compare.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000000..91f50e8c88af --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "sync" + + "github.com/containerd/log" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + var err error + cpuVariantValue, err = getCPUVariant() + if err != nil { + log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) + } + } + }) + return cpuVariantValue +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go new file mode 100644 index 000000000000..98c7001f9393 --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo_linux.go @@ -0,0 +1,160 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +// getMachineArch retrieves the machine architecture through system call +func getMachineArch() (string, error) { + var uname unix.Utsname + err := unix.Uname(&uname) + if err != nil { + return "", err + } + + arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) + + return arch, nil +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound) +} + +// getCPUVariantFromArch get CPU variant from arch through a system call +func getCPUVariantFromArch(arch string) (string, error) { + + var variant string + + arch = strings.ToLower(arch) + + if arch == "aarch64" { + variant = "8" + } else if arch[0:4] == "armv" && len(arch) >= 5 { + // Valid arch format is in form of armvXx + switch arch[3:5] { + case "v8": + variant = "8" + case "v7": + variant = "7" + case "v6": + variant = "6" + case "v5": + variant = "5" + case "v4": + variant = "4" + case "v3": + variant = "3" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument) + } + return variant, nil +} + +// getCPUVariant returns cpu variant for ARM +// We first try reading "Cpu architecture" field from /proc/cpuinfo +// If we can't find it, then fall back using a system call +// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo +// was not present. +func getCPUVariant() (string, error) { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + if errors.Is(err, errNotFound) { + // Let's try getting CPU variant from machine architecture + arch, err := getMachineArch() + if err != nil { + return "", fmt.Errorf("failure getting machine architecture: %v", err) + } + + variant, err = getCPUVariantFromArch(arch) + if err != nil { + return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) + } + } else { + return "", fmt.Errorf("failure getting CPU variant: %v", err) + } + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go new file mode 100644 index 000000000000..97a1fe8a3e55 --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo_other.go @@ -0,0 +1,55 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" +) + +func getCPUVariant() (string, error) { + + var variant string + + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + } else if runtime.GOOS == "freebsd" { + // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) + // detecting those variants is currently unimplemented + switch runtime.GOARCH { + case "arm64": + variant = "v8" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented) + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go new file mode 100644 index 000000000000..2e26fd3b4fae --- /dev/null +++ b/vendor/github.com/containerd/platforms/database.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64", "amd64": + arch = "amd64" + if variant == "v1" { + variant = "" + } + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go new file mode 100644 index 000000000000..9d898d60e621 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// DefaultString returns the default string specifier for the platform, +// with [PR#6](https://github.com/containerd/platforms/pull/6) the result +// may now also include the OSVersion from the provided platform specification. +func DefaultString() string { + return FormatAll(DefaultSpec()) +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go new file mode 100644 index 000000000000..72355ca85fd9 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_darwin.go @@ -0,0 +1,44 @@ +//go:build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go new file mode 100644 index 000000000000..d3fe89e07668 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_freebsd.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + }) +} diff --git a/vendor/github.com/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000000..44acc47eb324 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_unix.go @@ -0,0 +1,40 @@ +//go:build !windows && !darwin && !freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000000..427ed72eb617 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_windows.go @@ -0,0 +1,118 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +type windowsmatcher struct { + specs.Platform + osVersionPrefix string + defaultMatcher Matcher +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m windowsmatcher) Match(p specs.Platform) bool { + match := m.defaultMatcher.Match(p) + + if match && m.OS == "windows" { + // HPC containers do not have OS version filled + if m.OSVersion == "" || p.OSVersion == "" { + return true + } + + hostOsVersion := getOSVersion(m.osVersionPrefix) + ctrOsVersion := getOSVersion(p.OSVersion) + return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion) + } + + return match +} + +func getOSVersion(osVersionPrefix string) osVersion { + parts := strings.Split(osVersionPrefix, ".") + if len(parts) < 3 { + return osVersion{} + } + + majorVersion, _ := strconv.Atoi(parts[0]) + minorVersion, _ := strconv.Atoi(parts[1]) + buildNumber, _ := strconv.Atoi(parts[2]) + + return osVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +func prefix(v string) string { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return v + } + return strings.Join(parts[0:3], ".") +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go new file mode 100644 index 000000000000..5ad721e779ef --- /dev/null +++ b/vendor/github.com/containerd/platforms/errors.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import "errors" + +// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs], +// however, they are not exported as they are not expected to be used as sentinel +// errors by consumers of this package. +// +//nolint:unused // not all errors are used on all platforms. +var ( + errNotFound = errors.New("not found") + errInvalidArgument = errors.New("invalid argument") + errNotImplemented = errors.New("not implemented") +) diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_compat_windows.go new file mode 100644 index 000000000000..89e66f0c0903 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platform_compat_windows.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// osVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type osVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information +const ( + // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + rs5 = 17763 + + // v21H2Server corresponds to Windows Server 2022 (ltsc2022). + v21H2Server = 20348 + + // v22H2Win11 corresponds to Windows 11 (2022 Update). + v22H2Win11 = 22621 +) + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + v21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func checkHostAndContainerCompat(host, ctr osVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < v21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go new file mode 100644 index 000000000000..1bbbdb91dbc2 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platforms.go @@ -0,0 +1,308 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// # Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// # Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// # Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// # ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// Similarly, the most common arm64 version v8, and most common amd64 version v1 +// are represented without the variant. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "fmt" + "path" + "regexp" + "runtime" + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) + osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`) +) + +const osAndVersionFormat = "%s(%s)" + +// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. +type Platform = specs.Platform + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return newDefaultMatcher(platform) +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return FormatAll(m.Platform) +} + +// ParseAll parses a list of platform specifiers into a list of platform. +func ParseAll(specifiers []string) ([]specs.Platform, error) { + platforms := make([]specs.Platform, len(specifiers)) + for i, s := range specifiers { + p, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("invalid platform %s: %w", s, err) + } + platforms[i] = p + } + return platforms, nil +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `[()]||[()]/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)` +// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value, +// and an empty string otherwise. +// If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument) + } + + // Limit to 4 elements to prevent unbounded split + parts := strings.SplitN(specifier, "/", 4) + + var p specs.Platform + for i, part := range parts { + if i == 0 { + // First element is [()] + osVer := osAndVersionRe.FindStringSubmatch(part) + if osVer == nil { + return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument) + } + + p.OS = normalizeOS(osVer[1]) + p.OSVersion = osVer[2] + } else { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument) + } + } + } + + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS (with or + // without the optional OSVersion specified) and look it up. + // If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument) + case 2: + // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care + // about whether or not we know of the platform. + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// FormatAll returns a string specifier that also includes the OSVersion from the +// provided platform specification. +func FormatAll(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + if platform.OSVersion != "" { + OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion) + return path.Join(OSAndVersion, platform.Architecture, platform.Variant) + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + return platform +} diff --git a/vendor/github.com/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go new file mode 100644 index 000000000000..03f4dcd99814 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platforms_other.go @@ -0,0 +1,30 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns the default Matcher for containerd +func newDefaultMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} diff --git a/vendor/github.com/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go new file mode 100644 index 000000000000..950e2a2ddbb5 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platforms_windows.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns a Windows matcher that will match on osVersionPrefix if +// the platform is Windows otherwise use the default matcher +func newDefaultMatcher(platform specs.Platform) Matcher { + prefix := prefix(platform.OSVersion) + return windowsmatcher{ + Platform: platform, + osVersionPrefix: prefix, + defaultMatcher: &matcher{ + Platform: Normalize(platform), + }, + } +} diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go index ce8c0d13c41b..6e0042487429 100644 --- a/vendor/github.com/containerd/ttrpc/metadata.go +++ b/vendor/github.com/containerd/ttrpc/metadata.go @@ -62,6 +62,34 @@ func (m MD) Append(key string, values ...string) { } } +// Clone returns a copy of MD or nil if it's nil. +// It's copied from golang's `http.Header.Clone` implementation: +// https://cs.opensource.google/go/go/+/refs/tags/go1.23.4:src/net/http/header.go;l=94 +func (m MD) Clone() MD { + if m == nil { + return nil + } + + // Find total number of values. + nv := 0 + for _, vv := range m { + nv += len(vv) + } + sv := make([]string, nv) // shared backing array for headers' values + m2 := make(MD, len(m)) + for k, vv := range m { + if vv == nil { + // Preserve nil values. + m2[k] = nil + continue + } + n := copy(sv, vv) + m2[k] = sv[:n:n] + sv = sv[n:] + } + return m2 +} + func (m MD) setRequest(r *Request) { for k, values := range m { for _, v := range values { diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index 26419831dac6..bb71de677b0b 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -74,9 +74,18 @@ func (s *Server) RegisterService(name string, desc *ServiceDesc) { } func (s *Server) Serve(ctx context.Context, l net.Listener) error { - s.addListener(l) + s.mu.Lock() + s.addListenerLocked(l) defer s.closeListener(l) + select { + case <-s.done: + s.mu.Unlock() + return ErrServerClosed + default: + } + s.mu.Unlock() + var ( backoff time.Duration handshaker = s.config.handshaker @@ -188,9 +197,7 @@ func (s *Server) Close() error { return err } -func (s *Server) addListener(l net.Listener) { - s.mu.Lock() - defer s.mu.Unlock() +func (s *Server) addListenerLocked(l net.Listener) { s.listeners[l] = struct{}{} } diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore new file mode 100644 index 000000000000..b7ed7f956df8 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.gitignore @@ -0,0 +1,6 @@ +# editor and IDE paraphernalia +.idea +.vscode + +# macOS paraphernalia +.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000000..df76d7d77169 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000000..86fefd5bf7d6 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,315 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 000000000000..75304b4437c1 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000000..ad88d40181c1 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,389 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, ErrBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, ErrBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, ErrBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000000..95136681ba76 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,850 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil || next.raw == nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + if idx < 0 { + if !SupportNegativeIndices { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + if idx < -len(*d) { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + idx += len(*d) + } + + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + + if idx < 0 { + if !SupportNegativeIndices { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + if idx < -len(ary) { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx < 0 { + if !SupportNegativeIndices { + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + if idx < -len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + idx += len(*d) + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + + if idx < 0 { + if !SupportNegativeIndices { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + if idx < -len(cur) { + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) + } + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) + } + + err = con.add(key, op.value()) + if err != nil { + return fmt.Errorf("error in add for path: '%s': %w", path, err) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) + } + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) + } + + err = con.remove(key) + if err != nil { + return fmt.Errorf("error in remove for path: '%s': %w", path, err) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return fmt.Errorf("replace operation failed to decode path: %w", err) + } + + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return fmt.Errorf("replace operation value must be object or array: %w", err) + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return fmt.Errorf("replace operation hit impossible case: %w", err) + } + + return nil + } + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) + } + + _, ok := con.get(key) + if ok != nil { + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) + } + + err = con.set(key, op.value()) + if err != nil { + return fmt.Errorf("error in remove for path: '%s': %w", path, err) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return fmt.Errorf("move operation failed to decode from: %w", err) + } + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) + } + + val, err := con.get(key) + if err != nil { + return fmt.Errorf("error in move for path: '%s': %w", key, err) + } + + err = con.remove(key) + if err != nil { + return fmt.Errorf("error in move for path: '%s': %w", key, err) + } + + path, err := op.Path() + if err != nil { + return fmt.Errorf("move operation failed to decode path: %w", err) + } + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) + } + + err = con.add(key, val) + if err != nil { + return fmt.Errorf("error in move for path: '%s': %w", path, err) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return fmt.Errorf("test operation failed to decode path: %w", err) + } + + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) + } + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) + } + + val, err := con.get(key) + if err != nil { + return fmt.Errorf("error in test for path: '%s': %w", path, err) + } + + if val == nil { + if op.value() == nil || op.value().raw == nil { + return nil + } + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) + } else if op.value() == nil { + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return fmt.Errorf("copy operation failed to decode from: %w", err) + } + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) + } + + val, err := con.get(key) + if err != nil { + return fmt.Errorf("error in copy for from: '%s': %w", from, err) + } + + path, err := op.Path() + if err != nil { + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) + } + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return fmt.Errorf("error while performing deep copy: %w", err) + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return fmt.Errorf("error while adding value during copy: %w", err) + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000000..25fdaf639dfc --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000000..d135bfe02382 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,189 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + +## Install + +``` +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### RGB colors + +If your terminal supports 24-bit colors, you can use RGB color codes. + +```go +color.RGB(255, 128, 0).Println("foreground orange") +color.RGB(230, 42, 42).Println("foreground red") + +color.BgRGB(255, 128, 0).Println("background orange") +color.BgRGB(230, 42, 42).Println("background red") +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") + +// Mix with RGB color codes +color.RGB(255, 128, 0).AddBgRGB(0, 0, 0).Println("orange with black background") + +color.BgRGB(255, 128, 0).AddRGB(255, 255, 255).Println("orange background with white foreground") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set to a non-empty string. + +`Color` has support to disable/enable colors programmatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`-no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + + +## Credits + +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000000..ee39b408e95f --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,685 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default, + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +const ( + ResetBold Attribute = iota + 22 + ResetItalic + ResetUnderline + ResetBlinking + _ + ResetReversed + ResetConcealed + ResetCrossedOut +) + +var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{ + Bold: ResetBold, + Faint: ResetBold, + Italic: ResetItalic, + Underline: ResetUnderline, + BlinkSlow: ResetBlinking, + BlinkRapid: ResetBlinking, + ReverseVideo: ResetReversed, + Concealed: ResetConcealed, + CrossedOut: ResetCrossedOut, +} + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite + + // used internally for 256 and 24-bit coloring + foreground +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite + + // used internally for 256 and 24-bit coloring + background +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorIsSet() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// RGB returns a new foreground color in 24-bit RGB. +func RGB(r, g, b int) *Color { + return New(foreground, 2, Attribute(r), Attribute(g), Attribute(b)) +} + +// BgRGB returns a new background color in 24-bit RGB. +func BgRGB(r, g, b int) *Color { + return New(background, 2, Attribute(r), Attribute(g), Attribute(b)) +} + +// AddRGB is used to chain foreground RGB SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0). +func (c *Color) AddRGB(r, g, b int) *Color { + c.params = append(c.params, foreground, 2, Attribute(r), Attribute(g), Attribute(b)) + return c +} + +// AddRGB is used to chain background RGB SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0). +func (c *Color) AddBgRGB(r, g, b int) *Color { + c.params = append(c.params, background, 2, Attribute(r), Attribute(g), Attribute(b)) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(w, c.format()) + return c +} + +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.wrap(sprintln(a...))) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + return fmt.Fprintln(Output, c.wrap(sprintln(a...))) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(sprintln(a...)) + "\n" +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(sprintln(a...)) + "\n" + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + //return fmt.Sprintf("%s[%dm", escape, Reset) + //for each element in sequence let's use the specific reset escape, or the generic one if not found + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(Reset)) + ra, ok := mapResetAttributes[v] + if ok { + format[i] = strconv.Itoa(int(ra)) + } + } + + return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";")) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise, this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if c == nil && c2 == nil { + return true + } + if c == nil || c2 == nil { + return false + } + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} + +// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline. +func sprintln(a ...interface{}) string { + return strings.TrimSuffix(fmt.Sprintln(a...), "\n") +} diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 000000000000..be01c558e506 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000000..9491ad5413c6 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,134 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However, there are times when custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/go-gorp/gorp/v3/.gitignore b/vendor/github.com/go-gorp/gorp/v3/.gitignore new file mode 100644 index 000000000000..a96e5fec5780 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/.gitignore @@ -0,0 +1,11 @@ +_test +*.test +_testmain.go +_obj +*~ +*.6 +6.out +gorptest.bin +tmp +.idea +coverage.out diff --git a/vendor/github.com/go-gorp/gorp/v3/.travis.yml b/vendor/github.com/go-gorp/gorp/v3/.travis.yml new file mode 100644 index 000000000000..958d260acef1 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/.travis.yml @@ -0,0 +1,36 @@ +language: go +go: +- "1.15.x" +- "1.16.x" +- tip + +matrix: + allow_failures: + - go: tip + +services: +- mysql +- postgresql +- sqlite3 + +env: + global: + - secure: RriLxF6+2yMl67hdVv8ImXlu0h62mhcpqjaOgYNU+IEbUQ7hx96CKY6gkpYubW3BgApvF5RH6j3+HKvh2kGp0XhDOYOQCODfBSaSipZ5Aa5RKjsEYLtuVIobvJ80awR9hUeql69+WXs0/s72WThG0qTbOUY4pqHWfteeY235hWM= + +install: + - go get -t -d + - go get -t -d -tags integration + +before_script: +- mysql -e "CREATE DATABASE gorptest;" +- mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'" +- psql -c "CREATE DATABASE gorptest;" -U postgres +- psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres +- go get github.com/lib/pq +- go get github.com/mattn/go-sqlite3 +- go get github.com/ziutek/mymysql/godrv +- go get github.com/go-sql-driver/mysql +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls + +script: ./test_all.sh diff --git a/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md b/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md new file mode 100644 index 000000000000..7bc145fd7c0c --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributions are very welcome! + +## First: Create an Issue + +Even if your fix is simple, we'd like to have an issue to relate to +the PR. Discussion about the architecture and value can go on the +issue, leaving PR comments exclusively for coding style. + +## Second: Make Your PR + +- Fork the `master` branch +- Make your change +- Make a PR against the `master` branch + +You don't need to wait for comments on the issue before making your +PR. If you do wait for comments, you'll have a better chance of +getting your PR accepted the first time around, but it's not +necessary. + +## Third: Be Patient + +- If your change breaks backward compatibility, this becomes + especially true. + +We all have lives and jobs, and many of us are no longer on projects +that make use of `gorp`. We will get back to you, but it might take a +while. + +## Fourth: Consider Becoming a Maintainer + +We really do need help. We will likely ask you for help after a good +PR, but if we don't, please create an issue requesting maintainership. +Considering how few of us are currently active, we are unlikely to +refuse good help. diff --git a/vendor/github.com/go-gorp/gorp/v3/LICENSE b/vendor/github.com/go-gorp/gorp/v3/LICENSE new file mode 100644 index 000000000000..b661111d0a9b --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-gorp/gorp/v3/README.md b/vendor/github.com/go-gorp/gorp/v3/README.md new file mode 100644 index 000000000000..983fe4343b7f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/README.md @@ -0,0 +1,809 @@ +# Go Relational Persistence + +[![build status](https://github.com/go-gorp/gorp/actions/workflows/go.yml/badge.svg)](https://github.com/go-gorp/gorp/actions) +[![issues](https://img.shields.io/github/issues/go-gorp/gorp.svg)](https://github.com/go-gorp/gorp/issues) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-gorp/gorp/v3.svg)](https://pkg.go.dev/github.com/go-gorp/gorp/v3) + +### Update 2016-11-13: Future versions + +As many of the maintainers have become busy with other projects, +progress toward the ever-elusive v2 has slowed to the point that we're +only occasionally making progress outside of merging pull requests. +In the interest of continuing to release, I'd like to lean toward a +more maintainable path forward. + +For the moment, I am releasing a v2 tag with the current feature set +from master, as some of those features have been actively used and +relied on by more than one project. Our next goal is to continue +cleaning up the code base with non-breaking changes as much as +possible, but if/when a breaking change is needed, we'll just release +new versions. This allows us to continue development at whatever pace +we're capable of, without delaying the release of features or refusing +PRs. + +## Introduction + +I hesitate to call gorp an ORM. Go doesn't really have objects, at +least not in the classic Smalltalk/Java sense. There goes the "O". +gorp doesn't know anything about the relationships between your +structs (at least not yet). So the "R" is questionable too (but I use +it in the name because, well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting +data in and out of your database, and helps your code focus on +algorithms, not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for + update/deletes) + +## Installation + +Use `go get` or your favorite vendoring tool, using whichever import +path you'd like. + +## Versioning + +We use semantic version tags. Feel free to import through `gopkg.in` +(e.g. `gopkg.in/gorp.v2`) to get the latest tag for a major version, +or check out the tag using your favorite vendoring tool. + +Development is not very active right now, but we have plans to +restructure `gorp` as we continue to move toward a more extensible +system. Whenever a breaking change is needed, the major version will +be bumped. + +The `master` branch is where all development is done, and breaking +changes may happen from time to time. That said, if you want to live +on the bleeding edge and are comfortable updating your code when we +make a breaking change, you may use `github.com/go-gorp/gorp` as your +import path. + +Check the version tags to see what's available. We'll make a good +faith effort to add badges for new versions, but we make no +guarantees. + +## Supported Go versions + +This package is guaranteed to be compatible with the latest 2 major +versions of Go. + +Any earlier versions are only supported on a best effort basis and can +be dropped any time. Go has a great compatibility promise. Upgrading +your program to a newer version of Go should never really be a +problem. + +## Migration guide + +#### Pre-v2 to v2 +Automatic mapping of the version column used in optimistic locking has +been removed as it could cause problems if the type was not int. The +version column must now explicitly be set with +`tablemap.SetVersionCol()`. + +## Help/Support + +Use our [`gitter` channel](https://gitter.im/go-gorp/gorp). We used +to use IRC, but with most of us being pulled in many directions, we +often need the email notifications from `gitter` to yell at us to sign +in. + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string `db:",size:50"` // Column size set to 50 + Body string `db:"article_body,size:1024"` // Set both column name and size +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +// You can optionally declare the field to be a primary key and/or autoincrement +// +type Product struct { + Id int64 `db:"product_id, primarykey, autoincrement"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like [sql-migrate](https://github.com/rubenv/sql-migrate), [goose](https://bitbucket.org/liamstask/goose) or [migrate](https://github.com/mattes/migrate)). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +err = dbmap.Insert(p1) +checkErr(err, "Insert failed") + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +err = dbmap.Insert(inv1) +checkErr(err, "Insert failed") + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + err = trans.Insert(per) + checkErr(err, "Insert failed") + + inv.PersonId = per.Id + err = trans.Insert(inv) + checkErr(err, "Insert failed") + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + + _, err := s.Exec(query, p.Id) + + if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +#### Note that this behaviour has changed in v2. See [Migration Guide](#migration-guide). + +gorp provides a simple optimistic locking feature, similar to Java's +JPA, that will raise an error if you try to update/delete a row whose +`version` column has a value different than the one in memory. This +provides a safe way to do "select then update" style operations +without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +err = dbmap.Insert(p1) // Version is now 1 +checkErr(err, "Insert failed") + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +_,err = dbmap.Update(p2) // Version is now 2 +checkErr(err, "Update failed") + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` +### Adding INDEX(es) on column(s) beyond the primary key ### + +Indexes are frequently critical for performance. Here is how to add +them to your tables. + +NB: SqlServer and Oracle need testing and possible adjustment to the +CreateIndexSuffix() and DropIndexSuffix() methods to make AddIndex() +work for them. + +In the example below we put an index both on the Id field, and on the +AcctId field. + +``` +type Account struct { + Id int64 + AcctId string // e.g. this might be a long uuid for portability +} + +// indexType (the 2nd param to AddIndex call) is "Btree" or "Hash" for MySQL. +// demonstrate adding a second index on AcctId, and constrain that field to have unique values. +dbm.AddTable(iptab.Account{}).SetKeys(true, "Id").AddIndex("AcctIdIndex", "Btree", []string{"AcctId"}).SetUnique(true) + +err = dbm.CreateTablesIfNotExists() +checkErr(err, "CreateTablesIfNotExists failed") + +err = dbm.CreateIndex() +checkErr(err, "CreateIndex failed") + +``` +Check the effect of the CreateIndex() call in mysql: +``` +$ mysql + +MariaDB [test]> show create table Account; ++---------+--------------------------+ +| Account | CREATE TABLE `Account` ( + `Id` bigint(20) NOT NULL AUTO_INCREMENT, + `AcctId` varchar(255) DEFAULT NULL, + PRIMARY KEY (`Id`), + UNIQUE KEY `AcctIdIndex` (`AcctId`) USING BTREE <<<--- yes! index added. +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ++---------+--------------------------+ + +``` + + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant +drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect +interface that should be implemented per database vendor. Dialects +are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` +for example DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: + github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) +have no good way to test them locally. So please try them and send +patches as needed, but expect a bit more unpredicability. + +## Sqlite3 Extensions + +In order to use sqlite3 extensions you need to first register a custom driver: + +```go +import ( + "database/sql" + + // use whatever database/sql driver you wish + sqlite "github.com/mattn/go-sqlite3" +) + +func customDriver() (*sql.DB, error) { + + // create custom driver with extensions defined + sql.Register("sqlite3-custom", &sqlite.SQLiteDriver{ + Extensions: []string{ + "mod_spatialite", + }, + }) + + // now you can then connect using the 'sqlite3-custom' driver instead of 'sqlite3' + return sql.Open("sqlite3-custom", "/tmp/post_db.bin") +} +``` + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable +placeholders in prepared SQL statements. Unlike some database +abstraction layers (such as JDBC), Go's `database/sql` does not +standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` +methods delegates to a Dialect implementation for each database, and +will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, +etc will not be parsed. Consequently you may have portability issues +if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql err := +dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work +around this. The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", +map[string]interface{} { "id": 30}) +``` + +Additionally, when using Postgres as your database, you should utilize +`$1` instead of `?` placeholders as utilizing `?` placeholders when +querying Postgres will result in `pq: operator does not exist` +errors. Alternatively, use `dbMap.Dialect.BindVar(varIdx)` to get the +proper variable binding for your dialect. + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` +driver, but note that the behavior of this type varies across database +drivers. + +MySQL users should be especially cautious. See: +https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider: + +- Using an integer field for time data and storing UNIX time. +- Using a custom time type that implements some SQL types: + - [`"database/sql".Scanner`](https://golang.org/pkg/database/sql/#Scanner) + - [`"database/sql/driver".Valuer`](https://golang.org/pkg/database/sql/driver/#Valuer) + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which +driver to use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql"(for mymysql), +"gomysql"(for go-sql-driver), "postgres", "sqlite" See the +`test_all.sh` script for examples of all 3 databases. This is the +script I run locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. +See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a +simple perf test. On my MacBook Pro gorp is about 2-3% slower than +hand written SQL. + + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/vendor/github.com/go-gorp/gorp/v3/column.go b/vendor/github.com/go-gorp/gorp/v3/column.go new file mode 100644 index 000000000000..383e9efb6579 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/column.go @@ -0,0 +1,76 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import "reflect" + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Query used for getting generated id after insert + GeneratedIdQuery string + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + MaxSize int + + DefaultValue string + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} diff --git a/vendor/github.com/go-gorp/gorp/v3/db.go b/vendor/github.com/go-gorp/gorp/v3/db.go new file mode 100644 index 000000000000..d78062e5b150 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/db.go @@ -0,0 +1,985 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "log" + "reflect" + "strconv" + "strings" + "time" +) + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + ctx context.Context + + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + // ExpandSlices when enabled will convert slice arguments in mappers into flat + // values. It will modify the query, adding more placeholders, and the mapper, + // adding each item of the slice as a new unique entry in the mapper. For + // example, given the scenario bellow: + // + // dbmap.Select(&output, "SELECT 1 FROM example WHERE id IN (:IDs)", map[string]interface{}{ + // "IDs": []int64{1, 2, 3}, + // }) + // + // The executed query would be: + // + // SELECT 1 FROM example WHERE id IN (:IDs0,:IDs1,:IDs2) + // + // With the mapper: + // + // map[string]interface{}{ + // "IDs": []int64{1, 2, 3}, + // "IDs0": int64(1), + // "IDs1": int64(2), + // "IDs2": int64(3), + // } + // + // It is also flexible for custom slice types. The value just need to + // implement stringer or numberer interfaces. + // + // type CustomValue string + // + // const ( + // CustomValueHey CustomValue = "hey" + // CustomValueOh CustomValue = "oh" + // ) + // + // type CustomValues []CustomValue + // + // func (c CustomValues) ToStringSlice() []string { + // values := make([]string, len(c)) + // for i := range c { + // values[i] = string(c[i]) + // } + // return values + // } + // + // func query() { + // // ... + // result, err := dbmap.Select(&output, "SELECT 1 FROM example WHERE value IN (:Values)", map[string]interface{}{ + // "Values": CustomValues([]CustomValue{CustomValueHey}), + // }) + // // ... + // } + ExpandSliceArgs bool + + tables []*TableMap + tablesDynamic map[string]*TableMap // tables that use same go-struct and different db table names + logger GorpLogger + logPrefix string +} + +func (m *DbMap) dynamicTableAdd(tableName string, tbl *TableMap) { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + m.tablesDynamic[tableName] = tbl +} + +func (m *DbMap) dynamicTableFind(tableName string) (*TableMap, bool) { + if m.tablesDynamic == nil { + return nil, false + } + tbl, found := m.tablesDynamic[tableName] + return tbl, found +} + +func (m *DbMap) dynamicTableMap() map[string]*TableMap { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + return m.tablesDynamic +} + +func (m *DbMap) WithContext(ctx context.Context) SqlExecutor { + copy := &DbMap{} + *copy = *m + copy.ctx = ctx + return copy +} + +func (m *DbMap) CreateIndex() error { + var err error + dialect := reflect.TypeOf(m.Dialect) + for _, table := range m.tables { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + for _, table := range m.dynamicTableMap() { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + return err +} + +func (m *DbMap) createIndexImpl(dialect reflect.Type, + table *TableMap, + index *IndexMap) error { + s := bytes.Buffer{} + s.WriteString("create") + if index.Unique { + s.WriteString(" unique") + } + s.WriteString(" index") + s.WriteString(fmt.Sprintf(" %s on %s", index.IndexName, table.TableName)) + if dname := dialect.Name(); dname == "PostgresDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(" (") + for x, col := range index.columns { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(col)) + } + s.WriteString(")") + + if dname := dialect.Name(); dname == "MySQLDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(";") + _, err := m.Exec(s.String()) + return err +} + +func (t *TableMap) DropIndex(name string) error { + + var err error + dialect := reflect.TypeOf(t.dbmap.Dialect) + for _, idx := range t.indexes { + if idx.IndexName == name { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("DROP INDEX %s", idx.IndexName)) + + if dname := dialect.Name(); dname == "MySQLDialect" { + s.WriteString(fmt.Sprintf(" %s %s", t.dbmap.Dialect.DropIndexSuffix(), t.TableName)) + } + s.WriteString(";") + _, e := t.dbmap.Exec(s.String()) + if e != nil { + err = e + } + break + } + } + t.ResetSql() + return err +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + return tmap +} + +// AddTableDynamic registers the given interface type with gorp. +// The table name will be dynamically determined at runtime by +// using the GetTableName method on DynamicTable interface +func (m *DbMap) AddTableDynamic(inp DynamicTable, schema string) *TableMap { + + val := reflect.ValueOf(inp) + elm := val.Elem() + t := elm.Type() + name := inp.TableName() + if name == "" { + panic("Missing table name in DynamicTable instance") + } + + // Check if there is another dynamic table with the same name + if _, found := m.dynamicTableFind(name); found { + panic(fmt.Sprintf("A table with the same name %v already exists", name)) + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + m.dynamicTableAdd(name, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, primaryKey []*ColumnMap) { + primaryKey = make([]*ColumnMap, 0) + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subpk := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subpk != nil { + primaryKey = append(primaryKey, subpk...) + } + } else { + // Tag = Name { ',' Option } + // Option = OptionKey [ ':' OptionValue ] + cArguments := strings.Split(f.Tag.Get("db"), ",") + columnName := cArguments[0] + var maxSize int + var defaultValue string + var isAuto bool + var isPK bool + var isNotNull bool + for _, argString := range cArguments[1:] { + argString = strings.TrimSpace(argString) + arg := strings.SplitN(argString, ":", 2) + + // check mandatory/unexpected option values + switch arg[0] { + case "size", "default": + // options requiring value + if len(arg) == 1 { + panic(fmt.Sprintf("missing option value for option %v on field %v", arg[0], f.Name)) + } + default: + // options where value is invalid (currently all other options) + if len(arg) == 2 { + panic(fmt.Sprintf("unexpected option value for option %v on field %v", arg[0], f.Name)) + } + } + + switch arg[0] { + case "size": + maxSize, _ = strconv.Atoi(arg[1]) + case "default": + defaultValue = arg[1] + case "primarykey": + isPK = true + case "autoincrement": + isAuto = true + case "notnull": + isNotNull = true + default: + panic(fmt.Sprintf("Unrecognized tag option for field %v: %v", f.Name, arg)) + } + } + if columnName == "" { + columnName = f.Name + } + + gotype := f.Type + valueType := gotype + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + value := reflect.New(valueType).Interface() + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + value = scanner.Holder + gotype = reflect.TypeOf(value) + } + } + if typer, ok := value.(SqlTyper); ok { + gotype = reflect.TypeOf(typer.SqlType()) + } else if typer, ok := value.(legacySqlTyper); ok { + log.Printf("Deprecation Warning: update your SqlType methods to return a driver.Value") + gotype = reflect.TypeOf(typer.SqlType()) + } else if valuer, ok := value.(driver.Valuer); ok { + // Only check for driver.Valuer if SqlTyper wasn't + // found. + v, err := valuer.Value() + if err == nil && v != nil { + gotype = reflect.TypeOf(v) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + DefaultValue: defaultValue, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + isPK: isPK, + isAutoIncr: isAuto, + isNotNull: isNotNull, + MaxSize: maxSize, + } + if isPK { + primaryKey = append(primaryKey, cm) + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + } + + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + sql := table.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + for _, tbl := range m.dynamicTableMap() { + sql := tbl.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + return err +} + +// DropTable drops an individual table. +// Returns an error when the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, false) +} + +// DropTableIfExists drops an individual table when the table exists. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + for _, table := range m.dynamicTableMap() { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, name string, addIfExists bool) error { + table := tableOrNil(m, t, name) + if table == nil { + return fmt.Errorf("table %s was not registered", table.TableName) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + for _, table := range m.dynamicTableMap() { + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, nil, list...) +} + +// UpdateColumns runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// Only the columns accepted by filter are included in the UPDATE. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(m, m, filter, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(m, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, "begin;") + } + tx, err := begin(m) + if err != nil { + return nil, err + } + return &Transaction{ + dbmap: m, + tx: tx, + closed: false, + }, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t, "") + if table == nil { + return nil, fmt.Errorf("no table found for type: %v", t.Name()) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// DynamicTableFor returns the *TableMap for the dynamic table corresponding +// to the input tablename +// If no table is mapped to that tablename an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) DynamicTableFor(tableName string, checkPK bool) (*TableMap, error) { + table, found := m.dynamicTableFind(tableName) + if !found { + return nil, fmt.Errorf("gorp: no table found for name: %v", tableName) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, nil) + } + return prepare(m, query) +} + +func tableOrNil(m *DbMap, t reflect.Type, name string) *TableMap { + if name != "" { + // Search by table name (dynamic tables) + if table, found := m.dynamicTableFind(name); found { + return table + } + return nil + } + + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + ifc := elem.Interface() + var t *TableMap + var err error + tableName := "" + if dyn, isDyn := ptr.(DynamicTable); isDyn { + tableName = dyn.TableName() + t, err = m.DynamicTableFor(tableName, checkPK) + } else { + etype := reflect.TypeOf(ifc) + t, err = m.TableFor(etype, checkPK) + } + + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) QueryRow(query string, args ...interface{}) *sql.Row { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return queryRow(m, query, args...) +} + +func (m *DbMap) Query(q string, args ...interface{}) (*sql.Rows, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&q, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, q, args...) + } + return query(m, q, args...) +} + +func (m *DbMap) trace(started time.Time, query string, args ...interface{}) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s] (%v)", m.logPrefix, query, margs, (time.Now().Sub(started))) + } +} + +type stringer interface { + ToStringSlice() []string +} + +type numberer interface { + ToInt64Slice() []int64 +} + +func expandSliceArgs(query *string, args ...interface{}) { + for _, arg := range args { + mapper, ok := arg.(map[string]interface{}) + if !ok { + continue + } + + for key, value := range mapper { + var replacements []string + + // add flexibility for any custom type to be convert to one of the + // acceptable formats. + if v, ok := value.(stringer); ok { + value = v.ToStringSlice() + } + if v, ok := value.(numberer); ok { + value = v.ToInt64Slice() + } + + switch v := value.(type) { + case []string: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint8: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint16: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int8: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int16: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []float32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []float64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + default: + continue + } + + if len(replacements) == 0 { + continue + } + + *query = strings.Replace(*query, fmt.Sprintf(":%s", key), strings.Join(replacements, ","), -1) + } + } +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect.go b/vendor/github.com/go-gorp/gorp/v3/dialect.go new file mode 100644 index 000000000000..fdea2b203cae --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect.go @@ -0,0 +1,105 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "reflect" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to append to "create index" statement + CreateIndexSuffix() string + + // string to append to "drop index" statement + DropIndexSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existence clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +// TargetQueryInserter is implemented by dialects that can perform +// assignment of integer primary key type by executing a query +// like "select sequence.currval from dual". +type TargetQueryInserter interface { + // TargetQueryInserter runs an insert operation and assigns the + // automatically generated primary key retrived by the query + // extracted from the GeneratedIdQuery field of the id column. + InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go b/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go new file mode 100644 index 000000000000..d068ebe85e2f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go @@ -0,0 +1,169 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + + /* == About varchar(N) == + * N is number of characters. + * A varchar column can store up to 65535 bytes. + * Remember that 1 character is 3 bytes in utf-8 charset. + * Also remember that each row can store up to 65535 bytes, + * and you have some overheads, so it's not possible for a + * varchar column to have 65535/3 characters really. + * So it would be better to use 'text' type in stead of + * large varchar type. + */ + if maxsize < 256 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) CreateIndexSuffix() string { + return "using" +} + +func (d MySQLDialect) DropIndexSuffix() string { + return "on" +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +func (d MySQLDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("sleep(%f)", s.Seconds()) +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go b/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go new file mode 100644 index 000000000000..01a99b8a1910 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go @@ -0,0 +1,139 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) CreateIndexSuffix() string { return "" } + +func (d OracleDialect) DropIndexSuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "NULL" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +// After executing the insert uses the ColMap IdQuery to get the generated id +func (d OracleDialect) InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error { + _, err := exec.Exec(insertSql, params...) + if err != nil { + return err + } + id, err := exec.SelectInt(idSql) + if err != nil { + return err + } + switch target.(type) { + case *int64: + *(target.(*int64)) = id + case *int32: + *(target.(*int32)) = int32(id) + case int: + *(target.(*int)) = int(id) + default: + return fmt.Errorf("Id field can be int, int32 or int64") + } + return nil +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go b/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go new file mode 100644 index 000000000000..7a9c50bf8ab0 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go @@ -0,0 +1,149 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type PostgresDialect struct { + suffix string + LowercaseFields bool +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + d.QuoteField(col.ColumnName) +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) CreateIndexSuffix() string { + return "using" +} + +func (d PostgresDialect) DropIndexSuffix() string { + return "" +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +func (d PostgresDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("pg_sleep(%f)", s.Seconds()) +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d PostgresDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go b/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go new file mode 100644 index 000000000000..2e2cb89523e2 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go @@ -0,0 +1,152 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +type SnowflakeDialect struct { + suffix string + LowercaseFields bool +} + +func (d SnowflakeDialect) QuerySuffix() string { return ";" } + +func (d SnowflakeDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32: + + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "binary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d SnowflakeDialect) AutoIncrStr() string { + return "" +} + +func (d SnowflakeDialect) AutoIncrBindValue() string { + return "default" +} + +func (d SnowflakeDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SnowflakeDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SnowflakeDialect) CreateIndexSuffix() string { + return "" +} + +func (d SnowflakeDialect) DropIndexSuffix() string { + return "" +} + +func (d SnowflakeDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d SnowflakeDialect) BindVar(i int) string { + return "?" +} + +func (d SnowflakeDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d SnowflakeDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d SnowflakeDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d SnowflakeDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SnowflakeDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SnowflakeDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go new file mode 100644 index 000000000000..2296275b9516 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go @@ -0,0 +1,112 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" +) + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SqliteDialect) CreateIndexSuffix() string { + return "" +} + +func (d SqliteDialect) DropIndexSuffix() string { + return "" +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go new file mode 100644 index 000000000000..ec06aed66fbc --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go @@ -0,0 +1,145 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Microsoft SQL Server databases. +// Use gorp.SqlServerDialect{"2005"} for legacy datatypes. +// Tested with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + + // If set to "2005" legacy datatypes will be used + Version string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "numeric(20,0)" + case reflect.Float32: + return "float(24)" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "bit" + case "NullTime", "Time": + if d.Version == "2005" { + return "datetime" + } + return "datetime2" + } + + if maxsize < 1 { + if d.Version == "2005" { + maxsize = 255 + } else { + return fmt.Sprintf("nvarchar(max)") + } + } + return fmt.Sprintf("nvarchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +func (d SqlServerDialect) CreateTableSuffix() string { return ";" } + +func (d SqlServerDialect) TruncateClause() string { + return "truncate table" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return "[" + strings.Replace(f, "]", "]]", -1) + "]" +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + return d.QuoteField(schema) + "." + d.QuoteField(table) +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if schema_id(N'%s') is null %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", d.QuoteField(schema)) + } + s := fmt.Sprintf("if object_id('%s%s') is not null %s", schema_clause, d.QuoteField(table), command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", schema) + } + s := fmt.Sprintf("if object_id('%s%s') is null %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) CreateIndexSuffix() string { return "" } +func (d SqlServerDialect) DropIndexSuffix() string { return "" } diff --git a/vendor/github.com/go-gorp/gorp/v3/doc.go b/vendor/github.com/go-gorp/gorp/v3/doc.go new file mode 100644 index 000000000000..593f1c3c1260 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/doc.go @@ -0,0 +1,11 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +package gorp diff --git a/vendor/github.com/go-gorp/gorp/v3/errors.go b/vendor/github.com/go-gorp/gorp/v3/errors.go new file mode 100644 index 000000000000..752ad1bca4c9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/errors.go @@ -0,0 +1,31 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +// TODO: discuss wether this needs an error. encoding/json silently ignores missing fields +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: no fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/vendor/github.com/go-gorp/gorp/v3/gorp.go b/vendor/github.com/go-gorp/gorp/v3/gorp.go new file mode 100644 index 000000000000..fc6545676d18 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/gorp.go @@ -0,0 +1,672 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// OracleString (empty string is null) +// TODO: move to dialect/oracle?, rename to String? +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// SqlTyper is a type that returns its database type. Most of the +// time, the type can just use "database/sql/driver".Valuer; but when +// it returns nil for its empty value, it needs to implement SqlTyper +// to have its column type detected properly during table creation. +type SqlTyper interface { + SqlType() driver.Value +} + +// legacySqlTyper prevents breaking clients who depended on the previous +// SqlTyper interface +type legacySqlTyper interface { + SqlType() driver.Valuer +} + +// for fields that exists in DB table, but not exists in struct +type dummyField struct{} + +// Scan implements the Scanner interface. +func (nt *dummyField) Scan(value interface{}) error { + return nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + WithContext(ctx context.Context) SqlExecutor + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// DynamicTable allows the users of gorp to dynamically +// use different database table names during runtime +// while sharing the same golang struct for in-memory data +type DynamicTable interface { + TableName() string + SetTableName(string) +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +func argValue(a interface{}) interface{} { + v, ok := a.(driver.Valuer) + if !ok { + return a + } + vV := reflect.ValueOf(v) + if vV.Kind() == reflect.Ptr && vV.IsNil() { + return nil + } + ret, err := v.Value() + if err != nil { + return a + } + return ret +} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + v := argValue(a) + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +// Calls the Exec function on the executor, but attempts to expand any eligible named +// query arguments first. +func maybeExpandNamedQueryAndExec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + dbMap := extractDbMap(e) + + if len(args) == 1 { + query, args = maybeExpandNamedQuery(dbMap, query, args) + } + + return exec(e, query, args...) +} + +func extractDbMap(e SqlExecutor) *DbMap { + switch m := e.(type) { + case *DbMap: + return m + case *Transaction: + return m.dbmap + } + return nil +} + +// executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func extractExecutorAndContext(e SqlExecutor) (executor, context.Context) { + switch m := e.(type) { + case *DbMap: + return m.Db, m.ctx + case *Transaction: + return m.tx, m.ctx + } + return nil, nil +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + var ( + arg = args[0] + argval = reflect.ValueOf(arg) + ) + if argval.Kind() == reflect.Ptr { + argval = argval.Elem() + } + + if argval.Kind() == reflect.Map && argval.Type().Key().Kind() == reflect.String { + return expandNamedQuery(m, query, func(key string) reflect.Value { + return argval.MapIndex(reflect.ValueOf(key)) + }) + } + if argval.Kind() != reflect.Struct { + return query, args + } + if _, ok := arg.(time.Time); ok { + // time.Time is driver.Value + return query, args + } + if _, ok := arg.(driver.Valuer); ok { + // driver.Valuer will be converted to driver.Value. + return query, args + } + + return expandNamedQuery(m, query, argval.FieldByName) +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, name string, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t, name) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + cArguments := strings.Split(field.Tag.Get("db"), ",") + fieldName = cArguments[0] + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +type foundTable struct { + table *TableMap + dynName *string +} + +func tableFor(m *DbMap, t reflect.Type, i interface{}) (*foundTable, error) { + if dyn, isDynamic := i.(DynamicTable); isDynamic { + tableName := dyn.TableName() + table, err := m.DynamicTableFor(tableName, true) + if err != nil { + return nil, err + } + return &foundTable{ + table: table, + dynName: &tableName, + }, nil + } + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + return &foundTable{table: table}, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + foundTable, err := tableFor(m, t, i) + if err != nil { + return nil, err + } + table := foundTable.table + + plan := table.bindGet() + + v := reflect.New(t) + if foundTable.dynName != nil { + retDyn := v.Interface().(DynamicTable) + retDyn.SetTableName(*foundTable.dynName) + } + + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.QueryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, colFilter ColumnFilter, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem, colFilter) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + case TargetQueryInserter: + var idQuery = table.ColMap(bi.autoIncrFieldName).GeneratedIdQuery + if idQuery == "" { + return fmt.Errorf("gorp: cannot set %s value if its ColumnMap.GeneratedIdQuery is empty", bi.autoIncrFieldName) + } + err := inserter.InsertQueryToTarget(exec, bi.query, idQuery, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.ExecContext(ctx, query, args...) + } + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.PrepareContext(ctx, query) + } + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryRowContext(ctx, query, args...) + } + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryContext(ctx, query, args...) + } + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + if m.ctx != nil { + return m.Db.BeginTx(m.ctx, nil) + } + + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/v3/hooks.go b/vendor/github.com/go-gorp/gorp/v3/hooks.go new file mode 100644 index 000000000000..1e80bca7ee87 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/hooks.go @@ -0,0 +1,42 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +//++ TODO v2-phase3: HasPostGet => PostGetter, HasPostDelete => PostDeleter, etc. + +// HasPostGet provides PostGet() which will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// HasPostDelete provides PostDelete() which will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// HasPostUpdate provides PostUpdate() which will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// HasPostInsert provides PostInsert() which will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// HasPreDelete provides PreDelete() which will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// HasPreUpdate provides PreUpdate() which will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// HasPreInsert provides PreInsert() which will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/vendor/github.com/go-gorp/gorp/v3/index.go b/vendor/github.com/go-gorp/gorp/v3/index.go new file mode 100644 index 000000000000..df1cf5520528 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/index.go @@ -0,0 +1,49 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +// IndexMap represents a mapping between a Go struct field and a single +// index in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type IndexMap struct { + // Index name in db table + IndexName string + + // If true, " unique" is added to create index statements. + // Not used elsewhere + Unique bool + + // Index type supported by Dialect + // Postgres: B-tree, Hash, GiST and GIN. + // Mysql: Btree, Hash. + // Sqlite: nil. + IndexType string + + // Columns name for single and multiple indexes + columns []string +} + +// Rename allows you to specify the index name in the table +// +// Example: table.IndMap("customer_test_idx").Rename("customer_idx") +// +func (idx *IndexMap) Rename(indname string) *IndexMap { + idx.IndexName = indname + return idx +} + +// SetUnique adds "unique" to the create index statements for this +// index, if b is true. +func (idx *IndexMap) SetUnique(b bool) *IndexMap { + idx.Unique = b + return idx +} + +// SetIndexType specifies the index type supported by chousen SQL Dialect +func (idx *IndexMap) SetIndexType(indtype string) *IndexMap { + idx.IndexType = indtype + return idx +} diff --git a/vendor/github.com/go-gorp/gorp/v3/lockerror.go b/vendor/github.com/go-gorp/gorp/v3/lockerror.go new file mode 100644 index 000000000000..7e81891e2b4f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/lockerror.go @@ -0,0 +1,56 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" +) + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} diff --git a/vendor/github.com/go-gorp/gorp/v3/logging.go b/vendor/github.com/go-gorp/gorp/v3/logging.go new file mode 100644 index 000000000000..e8cba3db2cd9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/logging.go @@ -0,0 +1,42 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import "fmt" + +// GorpLogger is a deprecated alias of Logger. +type GorpLogger = Logger + +// Logger is the type that gorp uses to log SQL statements. +// See DbMap.TraceOn. +type Logger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies Logger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger Logger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} diff --git a/vendor/github.com/go-gorp/gorp/v3/nulltypes.go b/vendor/github.com/go-gorp/gorp/v3/nulltypes.go new file mode 100644 index 000000000000..87b21f83fa12 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/nulltypes.go @@ -0,0 +1,65 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "database/sql/driver" + "log" + "time" +) + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + log.Printf("Time scan value is: %#v", value) + switch t := value.(type) { + case time.Time: + nt.Time, nt.Valid = t, true + case []byte: + v := strToTime(string(t)) + if v != nil { + nt.Valid = true + nt.Time = *v + } + case string: + v := strToTime(t) + if v != nil { + nt.Valid = true + nt.Time = *v + } + } + return nil +} + +func strToTime(v string) *time.Time { + for _, dtfmt := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + "2006-01-02 15:04:05-07:00", + } { + if t, err := time.Parse(dtfmt, v); err == nil { + return &t + } + } + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-gorp/gorp/v3/select.go b/vendor/github.com/go-gorp/gorp/v3/select.go new file mode 100644 index 000000000000..2d2d59618680 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/select.go @@ -0,0 +1,359 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "database/sql" + "fmt" + "reflect" +) + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { // FIXME: double negative, rename NonFatalError to FatalError + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { // FIXME: invert if/else + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.Query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + tableName := "" + var dynObj DynamicTable + isDynamic := false + if dynObj, isDynamic = i.(DynamicTable); isDynamic { + tableName = dynObj.TableName() + } + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.Query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + colToFieldIndex, err = columnToFieldIndex(m, t, tableName, cols) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + + if isDynamic { + v.Interface().(DynamicTable).SetTableName(tableName) + } + + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy dummyField + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} diff --git a/vendor/github.com/go-gorp/gorp/v3/table.go b/vendor/github.com/go-gorp/gorp/v3/table.go new file mode 100644 index 000000000000..5931b2d9eb04 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/table.go @@ -0,0 +1,258 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + indexes []*IndexMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0, len(fieldNames)) + for _, name := range fieldNames { + columns = append(columns, name) + } + + for _, existingColumns := range t.uniqueTogether { + if equal(existingColumns, columns) { + return t + } + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// IdxMap returns the IndexMap pointer matching the given index name. +func (t *TableMap) IdxMap(field string) *IndexMap { + for _, idx := range t.indexes { + if idx.IndexName == field { + return idx + } + } + return nil +} + +// AddIndex registers the index with gorp for specified table with given parameters. +// This operation is idempotent. If index is already mapped, the +// existing *IndexMap is returned +// Function will panic if one of the given for index columns does not exists +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +func (t *TableMap) AddIndex(name string, idxtype string, columns []string) *IndexMap { + // check if we have a index with this name already + for _, idx := range t.indexes { + if idx.IndexName == name { + return idx + } + } + for _, icol := range columns { + if res := t.ColMap(icol); res == nil { + e := fmt.Sprintf("No ColumnName in table %s to create index on", t.TableName) + panic(e) + } + } + + idx := &IndexMap{IndexName: name, Unique: false, IndexType: idxtype, columns: columns} + t.indexes = append(t.indexes, idx) + t.ResetSql() + return idx +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +// SqlForCreateTable gets a sequence of SQL commands that will create +// the specified table and any associated schema +func (t *TableMap) SqlForCreate(ifNotExists bool) string { + s := bytes.Buffer{} + dialect := t.dbmap.Dialect + + if strings.TrimSpace(t.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(dialect.IfSchemaNotExists(schemaCreate, t.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", t.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(dialect.IfTableNotExists(tableCreate, t.SchemaName, t.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(t.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", dialect.AutoIncrStr())) + } + + x++ + } + } + if len(t.keys) > 1 { + s.WriteString(", primary key (") + for x := range t.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(t.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(t.uniqueTogether) > 0 { + for _, columns := range t.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(dialect.CreateTableSuffix()) + s.WriteString(dialect.QuerySuffix()) + return s.String() +} + +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/go-gorp/gorp/v3/table_bindings.go b/vendor/github.com/go-gorp/gorp/v3/table_bindings.go new file mode 100644 index 000000000000..43c0b3dd4880 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/table_bindings.go @@ -0,0 +1,305 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "sync" +) + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Used to filter columns when selectively updating +type ColumnFilter func(*ColumnMap) bool + +func acceptAllFilter(col *ColumnMap) bool { + return true +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string + once sync.Once +} + +func (plan *bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := &t.insertPlan + plan.once.Do(func() { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + if col.DefaultValue == "" { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } else { + s2.WriteString(col.DefaultValue) + } + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value, colFilter ColumnFilter) (bindInstance, error) { + if colFilter == nil { + colFilter = acceptAllFilter + } + + plan := &t.updatePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient && colFilter(col) { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := &t.deletePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() *bindPlan { + plan := &t.getPlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan +} diff --git a/vendor/github.com/go-gorp/gorp/v3/test_all.sh b/vendor/github.com/go-gorp/gorp/v3/test_all.sh new file mode 100644 index 000000000000..91007d645408 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/test_all.sh @@ -0,0 +1,23 @@ +#!/bin/bash -ex + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +echo "Running unit tests" +go test -race + +echo "Testing against postgres" +export GORP_TEST_DSN="host=postgres user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test -tags integration $GOBUILDFLAG $@ . + +echo "Testing against sqlite" +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test -tags integration $GOBUILDFLAG $@ . +rm -f /tmp/gorptest.bin + +echo "Testing against mysql" +export GORP_TEST_DSN="gorptest:gorptest@tcp(mysql)/gorptest" +export GORP_TEST_DIALECT=mysql +go test -tags integration $GOBUILDFLAG $@ . diff --git a/vendor/github.com/go-gorp/gorp/v3/transaction.go b/vendor/github.com/go-gorp/gorp/v3/transaction.go new file mode 100644 index 000000000000..d505d94c914d --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/transaction.go @@ -0,0 +1,239 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "context" + "database/sql" + "time" +) + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + ctx context.Context + dbmap *DbMap + tx *sql.Tx + closed bool +} + +func (t *Transaction) WithContext(ctx context.Context) SqlExecutor { + copy := &Transaction{} + *copy = *t + copy.ctx = ctx + return copy +} + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, nil, list...) +} + +// UpdateColumns had the same behavior as DbMap.UpdateColumns(), but runs in a transaction. +func (t *Transaction) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(t.dbmap, t, filter, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(t, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "commit;") + } + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "rollback;") + } + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + return prepare(t, query) +} + +func (t *Transaction) QueryRow(query string, args ...interface{}) *sql.Row { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return queryRow(t, query, args...) +} + +func (t *Transaction) Query(q string, args ...interface{}) (*sql.Rows, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&q, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, q, args...) + } + return query(t, q, args...) +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 000000000000..b4ae623be552 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 000000000000..e8a276826cf2 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.5.3 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 000000000000..9d4735cad9f4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 000000000000..804cf22e6460 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 000000000000..02e7de80a0b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 000000000000..2afde343af83 --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 000000000000..514a9a5c450a --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 000000000000..8e65356cdc9f --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 000000000000..a8130e93eae5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 000000000000..0998e95b0eaf --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 000000000000..7c968ee368b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 000000000000..7fd763ecd8ea --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 000000000000..f80e007fb838 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 000000000000..d72f69efff75 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 000000000000..db57ac8eb49d --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 000000000000..0d4ecd36b80b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 000000000000..a7347250e8d5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 000000000000..8ee58fe1b3c7 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 000000000000..8208085a1996 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 000000000000..ce30245a40bc --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 000000000000..4379042e42f3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 000000000000..9ea6f3094398 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 000000000000..ee6e3954c1f3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 000000000000..85bea8c68ec4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 000000000000..c5106f8196ca --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 000000000000..3875950bb8c7 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 000000000000..0a17616d3cb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 000000000000..f58144e733e2 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.amrom.workers.dev") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.amrom.workers.dev")) + g.Match("*.github.amrom.workers.dev") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 000000000000..3220a694a9cb --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 000000000000..429b4094303e --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 000000000000..a1c8d1962a06 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 000000000000..2797c4e83a4f --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 000000000000..1d168b148299 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 000000000000..a72355641072 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 000000000000..e8ee1920b17e --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/gosuri/uitable/.travis.yml b/vendor/github.com/gosuri/uitable/.travis.yml new file mode 100644 index 000000000000..26fc3b438bfb --- /dev/null +++ b/vendor/github.com/gosuri/uitable/.travis.yml @@ -0,0 +1,6 @@ +language: go +sudo: false +install: + - go get ./... +go: + - tip diff --git a/vendor/github.com/gosuri/uitable/LICENSE b/vendor/github.com/gosuri/uitable/LICENSE new file mode 100644 index 000000000000..e436d90439dd --- /dev/null +++ b/vendor/github.com/gosuri/uitable/LICENSE @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (c) 2015, Greg Osuri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/Makefile b/vendor/github.com/gosuri/uitable/Makefile new file mode 100644 index 000000000000..60246a8a93c4 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/Makefile @@ -0,0 +1,4 @@ +test: + @go test ./... + +.PHONY: test diff --git a/vendor/github.com/gosuri/uitable/README.md b/vendor/github.com/gosuri/uitable/README.md new file mode 100644 index 000000000000..683b538d1329 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/README.md @@ -0,0 +1,67 @@ +# uitable [![GoDoc](https://godoc.org/github.com/gosuri/uitable?status.svg)](https://godoc.org/github.com/gosuri/uitable) [![Build Status](https://travis-ci.org/gosuri/uitable.svg?branch=master)](https://travis-ci.org/gosuri/uitable) + +uitable is a go library for representing data as tables for terminal applications. It provides primitives for sizing and wrapping columns to improve readability. + +## Example Usage + +Full source code for the example is available at [example/main.go](example/main.go) + +```go +table := uitable.New() +table.MaxColWidth = 50 + +table.AddRow("NAME", "BIRTHDAY", "BIO") +for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) +} +fmt.Println(table) +``` + +Will render the data as: + +```sh +NAME BIRTHDAY BIO +Ada Lovelace December 10, 1815 Ada was a British mathematician and writer, chi... +Alan Turing June 23, 1912 Alan was a British pioneering computer scientis... +``` + +For wrapping in two columns: + +```go +table = uitable.New() +table.MaxColWidth = 80 +table.Wrap = true // wrap columns + +for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank +} +fmt.Println(table) +``` + +Will render the data as: + +``` +Name: Ada Lovelace +Birthday: December 10, 1815 +Bio: Ada was a British mathematician and writer, chiefly known for her work on + Charles Babbage's early mechanical general-purpose computer, the Analytical + Engine + +Name: Alan Turing +Birthday: June 23, 1912 +Bio: Alan was a British pioneering computer scientist, mathematician, logician, + cryptanalyst and theoretical biologist +``` + +## Installation + +``` +$ go get -v github.com/gosuri/uitable +``` + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gosuri/uitable/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/gosuri/uitable/table.go b/vendor/github.com/gosuri/uitable/table.go new file mode 100644 index 000000000000..b764e51502c1 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/table.go @@ -0,0 +1,205 @@ +// Package uitable provides a decorator for formating data as a table +package uitable + +import ( + "fmt" + "strings" + "sync" + + "github.com/fatih/color" + "github.com/gosuri/uitable/util/strutil" + "github.com/gosuri/uitable/util/wordwrap" +) + +// Separator is the default column seperator +var Separator = "\t" + +// Table represents a decorator that renders the data in formatted in a table +type Table struct { + // Rows is the collection of rows in the table + Rows []*Row + + // MaxColWidth is the maximum allowed width for cells in the table + MaxColWidth uint + + // Wrap when set to true wraps the contents of the columns when the length exceeds the MaxColWidth + Wrap bool + + // Separator is the seperator for columns in the table. Default is "\t" + Separator string + + mtx *sync.RWMutex + rightAlign map[int]bool +} + +// New returns a new Table with default values +func New() *Table { + return &Table{ + Separator: Separator, + mtx: new(sync.RWMutex), + rightAlign: map[int]bool{}, + } +} + +// AddRow adds a new row to the table +func (t *Table) AddRow(data ...interface{}) *Table { + t.mtx.Lock() + defer t.mtx.Unlock() + r := NewRow(data...) + t.Rows = append(t.Rows, r) + return t +} + +// Bytes returns the []byte value of table +func (t *Table) Bytes() []byte { + return []byte(t.String()) +} + +func (t *Table) RightAlign(col int) { + t.mtx.Lock() + t.rightAlign[col] = true + t.mtx.Unlock() +} + +// String returns the string value of table +func (t *Table) String() string { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if len(t.Rows) == 0 { + return "" + } + + // determine the width for each column (cell in a row) + var colwidths []uint + for _, row := range t.Rows { + for i, cell := range row.Cells { + // resize colwidth array + if i+1 > len(colwidths) { + colwidths = append(colwidths, 0) + } + cellwidth := cell.LineWidth() + if t.MaxColWidth != 0 && cellwidth > t.MaxColWidth { + cellwidth = t.MaxColWidth + } + + if cellwidth > colwidths[i] { + colwidths[i] = cellwidth + } + } + } + + var lines []string + for _, row := range t.Rows { + row.Separator = t.Separator + for i, cell := range row.Cells { + cell.Width = colwidths[i] + cell.Wrap = t.Wrap + cell.RightAlign = t.rightAlign[i] + } + lines = append(lines, row.String()) + } + return strutil.Join(lines, "\n") +} + +// Row represents a row in a table +type Row struct { + // Cells is the group of cell for the row + Cells []*Cell + + // Separator for tabular columns + Separator string +} + +// NewRow returns a new Row and adds the data to the row +func NewRow(data ...interface{}) *Row { + r := &Row{Cells: make([]*Cell, len(data))} + for i, d := range data { + r.Cells[i] = &Cell{Data: d} + } + return r +} + +// String returns the string representation of the row +func (r *Row) String() string { + // get the max number of lines for each cell + var lc int // line count + for _, cell := range r.Cells { + if clc := len(strings.Split(cell.String(), "\n")); clc > lc { + lc = clc + } + } + + // allocate a two-dimentional array of cells for each line and add size them + cells := make([][]*Cell, lc) + for x := 0; x < lc; x++ { + cells[x] = make([]*Cell, len(r.Cells)) + for y := 0; y < len(r.Cells); y++ { + cells[x][y] = &Cell{Width: r.Cells[y].Width} + } + } + + // insert each line in a cell as new cell in the cells array + for y, cell := range r.Cells { + lines := strings.Split(cell.String(), "\n") + for x, line := range lines { + cells[x][y].Data = line + } + } + + // format each line + lines := make([]string, lc) + for x := range lines { + line := make([]string, len(cells[x])) + for y := range cells[x] { + line[y] = cells[x][y].String() + } + lines[x] = strutil.Join(line, r.Separator) + } + return strings.Join(lines, "\n") +} + +// Cell represents a column in a row +type Cell struct { + // Width is the width of the cell + Width uint + + // Wrap when true wraps the contents of the cell when the lenght exceeds the width + Wrap bool + + // RightAlign when true aligns contents to the right + RightAlign bool + + // Data is the cell data + Data interface{} +} + +// LineWidth returns the max width of all the lines in a cell +func (c *Cell) LineWidth() uint { + width := 0 + for _, s := range strings.Split(c.String(), "\n") { + w := strutil.StringWidth(s) + if w > width { + width = w + } + } + return uint(width) +} + +// String returns the string formated representation of the cell +func (c *Cell) String() string { + if c.Data == nil { + return strutil.PadLeft(" ", int(c.Width), ' ') + } + col := color.New(color.FgBlack) + col.DisableColor() + s := fmt.Sprintf("%v", col.Sprint(c.Data)) + if c.Width > 0 { + if c.Wrap && uint(len(s)) > c.Width { + return wordwrap.WrapString(s, c.Width) + } else { + return strutil.Resize(s, c.Width, c.RightAlign) + } + } + return s +} diff --git a/vendor/github.com/gosuri/uitable/util/strutil/strutil.go b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go new file mode 100644 index 000000000000..53f30a8fd3ba --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go @@ -0,0 +1,101 @@ +// Package strutil provides various utilities for manipulating strings +package strutil + +import ( + "bytes" + "regexp" + + "github.com/mattn/go-runewidth" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +// PadRight returns a new string of a specified length in which the end of the current string is padded with spaces or with a specified Unicode character. +func PadRight(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + buf := bytes.NewBufferString(str) + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + return buf.String() +} + +// PadLeft returns a new string of a specified length in which the beginning of the current string is padded with spaces or with a specified Unicode character. +func PadLeft(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + var buf bytes.Buffer + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + buf.WriteString(str) + return buf.String() +} + +// Resize resizes the string with the given length. It ellipses with '...' when the string's length exceeds +// the desired length or pads spaces to the right of the string when length is smaller than desired +func Resize(s string, length uint, rightAlign bool) string { + slen := StringWidth(s) + n := int(length) + if slen == n { + return s + } + // Pads only when length of the string smaller than len needed + if rightAlign { + s = PadLeft(s, n, ' ') + } else { + s = PadRight(s, n, ' ') + } + if slen > n { + rs := []rune(s) + var buf bytes.Buffer + w := 0 + for _, r := range rs { + buf.WriteRune(r) + rw := RuneWidth(r) + if w+rw >= n-3 { + break + } + w += rw + } + buf.WriteString("...") + s = buf.String() + } + return s +} + +// Join joins the list of the string with the delim provided. +// Returns an empty string for empty list +func Join(list []string, delim string) string { + if len(list) == 0 { + return "" + } + var buf bytes.Buffer + for i := 0; i < len(list)-1; i++ { + buf.WriteString(list[i] + delim) + } + buf.WriteString(list[len(list)-1]) + return buf.String() +} + +// Strip strips the string of all colors +func Strip(s string) string { + return re.ReplaceAllString(s, "") +} + +// StringWidth returns the actual width of the string without colors +func StringWidth(s string) int { + return runewidth.StringWidth(Strip(s)) +} + +// RuneWidth returns the actual width of the rune +func RuneWidth(s rune) int { + return runewidth.RuneWidth(s) +} diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md new file mode 100644 index 000000000000..229851590442 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/README.md b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md new file mode 100644 index 000000000000..60ae3117008d --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go new file mode 100644 index 000000000000..b2c63b87957e --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go @@ -0,0 +1,85 @@ +// Package wordwrap provides methods for wrapping the contents of a string +package wordwrap + +import ( + "bytes" + "unicode" + + "github.com/gosuri/uitable/util/strutil" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordWidth, spaceWidth int + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) > lim { + current = 0 + } else { + current += uint(spaceWidth) + spaceBuf.WriteTo(buf) + spaceWidth += strutil.StringWidth(buf.String()) + } + spaceBuf.Reset() + spaceWidth = 0 + } else { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + + spaceBuf.WriteRune(char) + spaceWidth += strutil.RuneWidth(char) + } else { + wordBuf.WriteRune(char) + wordWidth += strutil.RuneWidth(char) + + if current+uint(spaceWidth+wordWidth) > lim && uint(wordWidth) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceWidth = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000000..c33dcc7c928c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000000..444df08f8e77 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000000..44e368e56922 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,178 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000000..82b4de97c7e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000000..b97cd6ed02b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000000..71dd308ed811 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000000..3e2589bfde0c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000000..aab8e9abec9d --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000000..47f13c49a673 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000000..9c29efb7f87e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000000..f54574326461 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000000..5c477abe44f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000000..fecb14e81c54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 000000000000..d7b4b8d584b7 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 000000000000..270177259365 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 000000000000..e809c79abc52 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,118 @@ +# xstrings + +[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) +[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | # | +| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToPascalCase](https://godoc.org/github.com/huandu/xstrings#ToPascalCase) | - | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | +| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 000000000000..f427cc84e2ee --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,21 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 000000000000..5d8cfee470b0 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,633 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. +// +// Some samples. +// +// "some_words" => "someWords" +// "http_server" => "httpServer" +// "no_https" => "noHttps" +// "_complex__case_" => "_complex_Case_" +// "some words" => "someWords" +// "GOLANG_IS_GREAT" => "golangIsGreat" +func ToCamelCase(str string) string { + return toCamelCase(str, false) +} + +// ToPascalCase is to convert words separated by space, underscore and hyphen to pascal case. +// +// Some samples. +// +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" +// "GOLANG_IS_GREAT" => "GolangIsGreat" +func ToPascalCase(str string) string { + return toCamelCase(str, true) +} + +func toCamelCase(str string, isBig bool) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + var isFirstRuneUpper bool + var r0, r1 rune + var size int + + // leading connector will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !isConnector(r0) { + isFirstRuneUpper = unicode.IsUpper(r0) + + if isBig { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + } + + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + + return buf.String() + } + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r0) && isConnector(r1) { + buf.WriteRune(r1) + continue + } + + if isConnector(r1) { + isFirstRuneUpper = unicode.IsUpper(r0) + r0 = unicode.ToUpper(r0) + } else { + if isFirstRuneUpper { + if unicode.IsUpper(r0) { + r0 = unicode.ToLower(r0) + } else { + isFirstRuneUpper = false + } + } + + buf.WriteRune(r1) + } + } + + if isFirstRuneUpper && !isBig { + r0 = unicode.ToLower(r0) + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http_20x_ok" +// "Duration2m3s" => "duration_2m3s" +// "Bld4Floor3rd" => "bld4_floor_3rd" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http-20x-ok" +// "Duration2m3s" => "duration-2m3s" +// "Bld4Floor3rd" => "bld4-floor-3rd" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + wt, word, remaining := nextWord(str) + + for len(remaining) > 0 { + if wt != connectorWord { + toLower(buf, wt, word, connector) + } + + prev := wt + last := word + wt, word, remaining = nextWord(remaining) + + switch prev { + case numberWord: + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != punctWord && wt != connectorWord { + buf.WriteRune(connector) + } + + case connectorWord: + toLower(buf, prev, last, connector) + + case punctWord: + // nothing. + + default: + if wt != numberWord { + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + if len(remaining) == 0 { + break + } + + last := word + wt, word, remaining = nextWord(remaining) + + // consider number as a part of previous word. + // e.g. "Bld4Floor" => "bld4_floor" + if wt != alphabetWord { + toLower(buf, numberWord, last, connector) + + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + // if there are some lower case letters following a number, + // add connector before the number. + // e.g. "HTTP2xx" => "http_2xx" + buf.WriteRune(connector) + toLower(buf, numberWord, last, connector) + + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + } + } + + toLower(buf, wt, word, connector) + return buf.String() +} + +func isConnector(r rune) bool { + return r == '-' || r == '_' || unicode.IsSpace(r) +} + +type wordType int + +const ( + invalidWord wordType = iota + numberWord + upperCaseWord + alphabetWord + connectorWord + punctWord + otherWord +) + +func nextWord(str string) (wt wordType, word, remaining string) { + if len(str) == 0 { + return + } + + var offset int + remaining = str + r, size := nextValidRune(remaining, utf8.RuneError) + offset += size + + if r == utf8.RuneError { + wt = invalidWord + word = str[:offset] + remaining = str[offset:] + return + } + + switch { + case isConnector(r): + wt = connectorWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isConnector(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsPunct(r): + wt = punctWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsUpper(r): + wt = upperCaseWord + remaining = remaining[size:] + + if len(remaining) == 0 { + break + } + + r, size = nextValidRune(remaining, r) + + switch { + case unicode.IsUpper(r): + prevSize := size + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsUpper(r) { + break + } + + prevSize = size + offset += size + remaining = remaining[size:] + } + + // it's a bit complex when dealing with a case like "HTTPStatus". + // it's expected to be splitted into "HTTP" and "Status". + // Therefore "S" should be in remaining instead of word. + if len(remaining) > 0 && isAlphabet(r) { + offset -= prevSize + remaining = str[offset:] + } + + case isAlphabet(r): + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + case isAlphabet(r): + wt = alphabetWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsNumber(r): + wt = numberWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsNumber(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + default: + wt = otherWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + word = str[:offset] + return +} + +func nextValidRune(str string, prev rune) (r rune, size int) { + var sz int + + for len(str) > 0 { + r, sz = utf8.DecodeRuneInString(str) + size += sz + + if r != utf8.RuneError { + return + } + + str = str[sz:] + } + + r = prev + return +} + +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { + buf.Grow(buf.Len() + len(str)) + + if wt != upperCaseWord && wt != connectorWord { + buf.WriteString(str) + return + } + + for len(str) > 0 { + r, size := utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r) { + buf.WriteRune(connector) + } else if unicode.IsUpper(r) { + buf.WriteRune(unicode.ToLower(r)) + } else { + buf.WriteRune(r) + } + } +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &stringBuilder{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// - a - z +// - A - Z +// - 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &stringBuilder{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 000000000000..f96e38703a3a --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 000000000000..1a6ef069f613 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 000000000000..b32219bbd58d --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,173 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *stringBuilder + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteRune(' ') + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *stringBuilder, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 000000000000..ab42fe0fec63 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,220 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *stringBuilder + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &stringBuilder{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 000000000000..06812fea07db --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,8 @@ +//go:build go1.10 +// +build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 000000000000..ccaa5aedd33c --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,10 @@ +//go:build !go1.10 +// +build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 000000000000..1fac6a00be35 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,552 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *stringBuilder + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// Special characters: +// +// 1. '-' means a range of runes, e.g. +// "a-z" means all characters from 'a' to 'z' inclusive; +// "z-a" means all characters from 'z' to 'a' inclusive. +// 2. '^' as first character means a set of all runes excepted listed, e.g. +// "^a-z" means all characters except 'a' to 'z' inclusive. +// 3. '\' escapes special characters. +// +// Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *stringBuilder + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 000000000000..b2be23c87f32 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE new file mode 100644 index 000000000000..0d31edfa737e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/LICENSE @@ -0,0 +1,23 @@ + Copyright (c) 2013, Jason Moiron + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/jmoiron/sqlx/Makefile b/vendor/github.com/jmoiron/sqlx/Makefile new file mode 100644 index 000000000000..448b9ddd9cfd --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/Makefile @@ -0,0 +1,30 @@ +.ONESHELL: +SHELL = /bin/sh +.SHELLFLAGS = -ec + +BASE_PACKAGE := github.com/jmoiron/sqlx + +tooling: + go install honnef.co/go/tools/cmd/staticcheck@v0.4.7 + go install golang.org/x/vuln/cmd/govulncheck@v1.0.4 + go install golang.org/x/tools/cmd/goimports@v0.20.0 + +has-changes: + git diff --exit-code --quiet HEAD -- + +lint: + go vet ./... + staticcheck -checks=all ./... + +fmt: + go list -f '{{.Dir}}' ./... | xargs -I {} goimports -local $(BASE_PACKAGE) -w {} + +vuln-check: + govulncheck ./... + +test-race: + go test -v -race -count=1 ./... + +update-dependencies: + go get -u -t -v ./... + go mod tidy diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 000000000000..5bfd231a11cf --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,213 @@ +# sqlx + +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/jmoiron/sqlx/tree/master.svg?style=shield)](https://dl.circleci.com/status-badge/redirect/gh/jmoiron/sqlx/tree/master) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [user documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +1.3.0: + +* `sqlx.DB.Connx(context.Context) *sqlx.Conn` +* `sqlx.BindDriver(driverName, bindType)` +* support for `[]map[string]interface{}` to do "batch" insertions +* allocation & perf improvements for `sqlx.In` + +DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with +sqlx's wrapping of other types. + +`BindDriver` allows users to control the bindvars that sqlx will use for drivers, +and add new drivers at runtime. This results in a very slight performance hit +when resolving the driver into a bind type (~40ns per call), but it allows users +to specify what bindtype their driver uses even when sqlx has not been updated +to know about it by default. + +### Backwards Compatibility + +Compatibility with the most recent two versions of Go is a requirement for any +new changes. Compatibility beyond that is not guaranteed. + +Versioning is done with Go modules. Breaking changes (eg. removing deprecated API) +will get major version number bumps. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) + + + // batch insert + + // batch insert with structs + personStructs := []Person{ + {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, + {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, + {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personStructs) + + // batch insert with maps + personMaps := []map[string]interface{}{ + {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, + {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, + {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personMaps) +} +``` diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 000000000000..ec0da4e72ee6 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,265 @@ +package sqlx + +import ( + "bytes" + "database/sql/driver" + "errors" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED + AT +) + +var defaultBinds = map[int][]string{ + DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"}, + QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"}, + NAMED: []string{"oci8", "ora", "goracle", "godror"}, + AT: []string{"sqlserver"}, +} + +var binds sync.Map + +func init() { + for bind, drivers := range defaultBinds { + for _, driver := range drivers { + BindDriver(driver, bind) + } + } + +} + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + itype, ok := binds.Load(driverName) + if !ok { + return UNKNOWN + } + return itype.(int) +} + +// BindDriver sets the BindType for driverName to bindType. +func BindDriver(driverName string, bindType int) { + binds.Store(driverName, bindType) +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + case AT: + rqb = append(rqb, '@', 'p') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +func asSliceForIn(i interface{}) (v reflect.Value, ok bool) { + if i == nil { + return reflect.Value{}, false + } + + v = reflect.ValueOf(i) + t := reflectx.Deref(v.Type()) + + // Only expand slices + if t.Kind() != reflect.Slice { + return reflect.Value{}, false + } + + // []byte is a driver.Value type so it should not be expanded + if t == reflect.TypeOf([]byte{}) { + return reflect.Value{}, false + + } + + return v, true +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + var stackMeta [32]argMeta + + var meta []argMeta + if len(args) <= len(stackMeta) { + meta = stackMeta[:len(args)] + } else { + meta = make([]argMeta, len(args)) + } + + for i, arg := range args { + if a, ok := arg.(driver.Valuer); ok { + var err error + arg, err = a.Value() + if err != nil { + return "", nil, err + } + } + + if v, ok := asSliceForIn(arg); ok { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + + var buf strings.Builder + buf.Grow(len(query) + len(", ?")*flatArgsCount) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf.WriteString(query[:offset+i+1]) + + for si := 1; si < argMeta.length; si++ { + buf.WriteString(", ?") + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf.WriteString(query) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return buf.String(), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 000000000000..b80104175d11 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,11 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 000000000000..6ac447771317 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,458 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// convertMapStringInterface attempts to convert v to map[string]interface{}. +// Unlike v.(map[string]interface{}), this function works on named types that +// are convertible to map[string]interface{} as well. +func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) { + var m map[string]interface{} + mtype := reflect.TypeOf(m) + t := reflect.TypeOf(v) + if !t.ConvertibleTo(mtype) { + return nil, false + } + return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true + +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := convertMapStringInterface(arg); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + var v reflect.Value + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindAnyArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`) + +func findMatchingClosingBracketIndex(s string) int { + count := 0 + for i, ch := range s { + if ch == '(' { + count++ + } + if ch == ')' { + count-- + if count == 0 { + return i + } + } + } + return 0 +} + +func fixBound(bound string, loop int) string { + loc := valuesReg.FindStringIndex(bound) + // defensive guard when "VALUES (...)" not found + if len(loc) < 2 { + return bound + } + + openingBracketIndex := loc[1] - 1 + index := findMatchingClosingBracketIndex(bound[openingBracketIndex:]) + // defensive guard. must have closing bracket + if index == 0 { + return bound + } + closingBracketIndex := openingBracketIndex + index + 1 + + var buffer bytes.Buffer + + buffer.WriteString(bound[0:closingBracketIndex]) + for i := 0; i < loop-1; i++ { + buffer.WriteString(",") + buffer.WriteString(bound[openingBracketIndex:closingBracketIndex]) + } + buffer.WriteString(bound[closingBracketIndex:]) + return buffer.String() +} + +// bindArray binds a named parameter query with fields from an array or slice of +// structs argument. +func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + // do the initial binding with QUESTION; if bindType is not question, + // we can rebind it at the end. + bound, names, err := compileNamedQuery([]byte(query), QUESTION) + if err != nil { + return "", []interface{}{}, err + } + arrayValue := reflect.ValueOf(arg) + arrayLen := arrayValue.Len() + if arrayLen == 0 { + return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg) + } + var arglist = make([]interface{}, 0, len(names)*arrayLen) + for i := 0; i < arrayLen; i++ { + elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m) + if err != nil { + return "", []interface{}{}, err + } + arglist = append(arglist, elemArglist...) + } + if arrayLen > 1 { + bound = fixBound(bound, arrayLen) + } + // adjust binding type if we weren't on question + if bindType != QUESTION { + bound = Rebind(bindType, bound) + } + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + } else if inName && i > 0 && b == '=' && len(name) == 0 { + rebound = append(rebound, ':', '=') + inName = false + continue + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + case AT: + rebound = append(rebound, '@', 'p') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + t := reflect.TypeOf(arg) + k := t.Kind() + switch { + case k == reflect.Map && t.Key().Kind() == reflect.String: + m, ok := convertMapStringInterface(arg) + if !ok { + return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg) + } + return bindMap(bindType, query, m) + case k == reflect.Array || k == reflect.Slice: + return bindArray(bindType, query, arg, m) + default: + return bindStruct(bindType, query, arg, m) + } +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 000000000000..9ad23f4ed18c --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,133 @@ +//go:build go1.8 +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 000000000000..f01d3d1f084a --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 000000000000..8ec6a138283a --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,443 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + copy(x, is) + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + // check if nothing has already been pushed with the same path + // sometimes you can choose to override a type using embedded struct + fld, ok := flds.Paths[fi.Path] + if !ok || fld.Embedded { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 000000000000..8259a4feb694 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1054 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// - it is not a struct +// - it implements sql.Scanner +// - it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + return len(mapper().TypeMap(t).Index) == 0 +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i := i.(type) { + case DB: + return i.Mapper + case *DB: + return i.Mapper + case Tx: + return i.Mapper + case *Tx: + return i.Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +//lint:ignore U1000 ignoring this for now +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +// +//lint:ignore ST1003 changing this would break the package interface. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Conn is a wrapper around sql.Conn with extra functionality +type Conn struct { + *sql.Conn + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. It resets the slice length to zero before appending each element to +// the slice. If the destination slice type is a Struct, then StructScan will +// be used on each row. If the destination is some other kind of base type, +// then each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + direct.SetLen(0) + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows := rows.(type) { + case *Rows: + m = rows.Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 000000000000..32621d56d72e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,415 @@ +//go:build go1.8 +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Connx returns an *sqlx.Conn instead of an *sql.Conn. +func (db *DB) Connx(ctx context.Context) (*Conn, error) { + conn, err := db.DB.Conn(ctx) + if err != nil { + return nil, err + } + + return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := c.Conn.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// SelectContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, c, dest, query, args...) +} + +// GetContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, c, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, c, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := c.Conn.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := c.Conn.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper} +} + +// Rebind a query within a Conn's bindvar type. +func (c *Conn) Rebind(query string) string { + return Rebind(BindType(c.driverName), query) +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/lann/builder/.gitignore b/vendor/github.com/lann/builder/.gitignore new file mode 100644 index 000000000000..f54eb28f6edd --- /dev/null +++ b/vendor/github.com/lann/builder/.gitignore @@ -0,0 +1,2 @@ +*~ +\#*# diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml new file mode 100644 index 000000000000..c8860f69bc72 --- /dev/null +++ b/vendor/github.com/lann/builder/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - '1.8' + - '1.9' + - '1.10' + - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 000000000000..a109e8051c16 --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/README.md b/vendor/github.com/lann/builder/README.md new file mode 100644 index 000000000000..3b18550bb970 --- /dev/null +++ b/vendor/github.com/lann/builder/README.md @@ -0,0 +1,68 @@ +# Builder - fluent immutable builders for Go + +[![GoDoc](https://godoc.org/github.com/lann/builder?status.png)](https://godoc.org/github.com/lann/builder) +[![Build Status](https://travis-ci.org/lann/builder.png?branch=master)](https://travis-ci.org/lann/builder) + +Builder was originally written for +[Squirrel](https://github.com/lann/squirrel), a fluent SQL generator. It +is probably the best example of Builder in action. + +Builder helps you write **fluent** DSLs for your libraries with method chaining: + +```go +resp := ReqBuilder. + Url("http://golang.org"). + Header("User-Agent", "Builder"). + Get() +``` + +Builder uses **immutable** persistent data structures +([these](https://github.com/mndrix/ps), specifically) +so that each step in your method chain can be reused: + +```go +build := WordBuilder.AddLetters("Build") +builder := build.AddLetters("er") +building := build.AddLetters("ing") +``` + +Builder makes it easy to **build** structs using the **builder** pattern +(*surprise!*): + +```go +import "github.com/lann/builder" + +type Muppet struct { + Name string + Friends []string +} + +type muppetBuilder builder.Builder + +func (b muppetBuilder) Name(name string) muppetBuilder { + return builder.Set(b, "Name", name).(muppetBuilder) +} + +func (b muppetBuilder) AddFriend(friend string) muppetBuilder { + return builder.Append(b, "Friends", friend).(muppetBuilder) +} + +func (b muppetBuilder) Build() Muppet { + return builder.GetStruct(b).(Muppet) +} + +var MuppetBuilder = builder.Register(muppetBuilder{}, Muppet{}).(muppetBuilder) +``` +```go +MuppetBuilder. + Name("Beaker"). + AddFriend("Dr. Honeydew"). + Build() + +=> Muppet{Name:"Beaker", Friends:[]string{"Dr. Honeydew"}} +``` + +## License + +Builder is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/lann/builder/builder.go b/vendor/github.com/lann/builder/builder.go new file mode 100644 index 000000000000..ff621406a4f1 --- /dev/null +++ b/vendor/github.com/lann/builder/builder.go @@ -0,0 +1,225 @@ +// Package builder provides a method for writing fluent immutable builders. +package builder + +import ( + "github.com/lann/ps" + "go/ast" + "reflect" +) + +// Builder stores a set of named values. +// +// New types can be declared with underlying type Builder and used with the +// functions in this package. See example. +// +// Instances of Builder should be treated as immutable. It is up to the +// implementor to ensure mutable values set on a Builder are not mutated while +// the Builder is in use. +type Builder struct { + builderMap ps.Map +} + +var ( + EmptyBuilder = Builder{ps.NewMap()} + emptyBuilderValue = reflect.ValueOf(EmptyBuilder) +) + +func getBuilderMap(builder interface{}) ps.Map { + b := convert(builder, Builder{}).(Builder) + + if b.builderMap == nil { + return ps.NewMap() + } + + return b.builderMap +} + +// Set returns a copy of the given builder with a new value set for the given +// name. +// +// Set (and all other functions taking a builder in this package) will panic if +// the given builder's underlying type is not Builder. +func Set(builder interface{}, name string, v interface{}) interface{} { + b := Builder{getBuilderMap(builder).Set(name, v)} + return convert(b, builder) +} + +// Delete returns a copy of the given builder with the given named value unset. +func Delete(builder interface{}, name string) interface{} { + b := Builder{getBuilderMap(builder).Delete(name)} + return convert(b, builder) +} + +// Append returns a copy of the given builder with new value(s) appended to the +// named list. If the value was previously unset or set with Set (even to a e.g. +// slice values), the new value(s) will be appended to an empty list. +func Append(builder interface{}, name string, vs ...interface{}) interface{} { + return Extend(builder, name, vs) +} + +// Extend behaves like Append, except it takes a single slice or array value +// which will be concatenated to the named list. +// +// Unlike a variadic call to Append - which requires a []interface{} value - +// Extend accepts slices or arrays of any type. +// +// Extend will panic if the given value is not a slice, array, or nil. +func Extend(builder interface{}, name string, vs interface{}) interface{} { + if vs == nil { + return builder + } + + maybeList, ok := getBuilderMap(builder).Lookup(name) + + var list ps.List + if ok { + list, ok = maybeList.(ps.List) + } + if !ok { + list = ps.NewList() + } + + forEach(vs, func(v interface{}) { + list = list.Cons(v) + }) + + return Set(builder, name, list) +} + +func listToSlice(list ps.List, arrayType reflect.Type) reflect.Value { + size := list.Size() + slice := reflect.MakeSlice(arrayType, size, size) + for i := size - 1; i >= 0; i-- { + val := reflect.ValueOf(list.Head()) + slice.Index(i).Set(val) + list = list.Tail() + } + return slice +} + +var anyArrayType = reflect.TypeOf([]interface{}{}) + +// Get retrieves a single named value from the given builder. +// If the value has not been set, it returns (nil, false). Otherwise, it will +// return (value, true). +// +// If the named value was last set with Append or Extend, the returned value +// will be a slice. If the given Builder has been registered with Register or +// RegisterType and the given name is an exported field of the registered +// struct, the returned slice will have the same type as that field. Otherwise +// the slice will have type []interface{}. It will panic if the given name is a +// registered struct's exported field and the value set on the Builder is not +// assignable to the field. +func Get(builder interface{}, name string) (interface{}, bool) { + val, ok := getBuilderMap(builder).Lookup(name) + if !ok { + return nil, false + } + + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if ast.IsExported(name) { + structType := getBuilderStructType(reflect.TypeOf(builder)) + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + } + + val = listToSlice(list, arrayType).Interface() + } + + return val, true +} + +// GetMap returns a map[string]interface{} of the values set in the given +// builder. +// +// See notes on Get regarding returned slices. +func GetMap(builder interface{}) map[string]interface{} { + m := getBuilderMap(builder) + structType := getBuilderStructType(reflect.TypeOf(builder)) + + ret := make(map[string]interface{}, m.Size()) + + m.ForEach(func(name string, val ps.Any) { + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + + val = listToSlice(list, arrayType).Interface() + } + + ret[name] = val + }) + + return ret +} + +// GetStruct builds a new struct from the given registered builder. +// It will return nil if the given builder's type has not been registered with +// Register or RegisterValue. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// GetStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStruct(builder interface{}) interface{} { + structVal := newBuilderStruct(reflect.TypeOf(builder)) + if structVal == nil { + return nil + } + return scanStruct(builder, structVal) +} + +// GetStructLike builds a new struct from the given builder with the same type +// as the given struct. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// ScanStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStructLike(builder interface{}, strct interface{}) interface{} { + structVal := reflect.New(reflect.TypeOf(strct)).Elem() + return scanStruct(builder, &structVal) +} + +func scanStruct(builder interface{}, structVal *reflect.Value) interface{} { + getBuilderMap(builder).ForEach(func(name string, val ps.Any) { + if ast.IsExported(name) { + field := structVal.FieldByName(name) + + var value reflect.Value + switch v := val.(type) { + case nil: + switch field.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + value = reflect.Zero(field.Type()) + } + // nil is not valid for this Type; Set will panic + case ps.List: + value = listToSlice(v, field.Type()) + default: + value = reflect.ValueOf(val) + } + field.Set(value) + } + }) + + return structVal.Interface() +} diff --git a/vendor/github.com/lann/builder/reflect.go b/vendor/github.com/lann/builder/reflect.go new file mode 100644 index 000000000000..3b236e791833 --- /dev/null +++ b/vendor/github.com/lann/builder/reflect.go @@ -0,0 +1,24 @@ +package builder + +import "reflect" + +func convert(from interface{}, to interface{}) interface{} { + return reflect. + ValueOf(from). + Convert(reflect.TypeOf(to)). + Interface() +} + +func forEach(s interface{}, f func(interface{})) { + val := reflect.ValueOf(s) + + kind := val.Kind() + if kind != reflect.Slice && kind != reflect.Array { + panic(&reflect.ValueError{Method: "builder.forEach", Kind: kind}) + } + + l := val.Len() + for i := 0; i < l; i++ { + f(val.Index(i).Interface()) + } +} diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go new file mode 100644 index 000000000000..612845418e29 --- /dev/null +++ b/vendor/github.com/lann/builder/registry.go @@ -0,0 +1,59 @@ +package builder + +import ( + "reflect" + "sync" +) + +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) + +// RegisterType maps the given builderType to a structType. +// This mapping affects the type of slices returned by Get and is required for +// GetStruct to work. +// +// Returns a Value containing an empty instance of the registered builderType. +// +// RegisterType will panic if builderType's underlying type is not Builder or +// if structType's Kind is not Struct. +func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() + structType.NumField() // Panic if structType is not a struct + registry[builderType] = structType + emptyValue := emptyBuilderValue.Convert(builderType) + return &emptyValue +} + +// Register wraps RegisterType, taking instances instead of Types. +// +// Returns an empty instance of the registered builder type which can be used +// as the initial value for builder expressions. See example. +func Register(builderProto, structProto interface{}) interface{} { + empty := RegisterType( + reflect.TypeOf(builderProto), + reflect.TypeOf(structProto), + ).Interface() + return empty +} + +func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() + structType, ok := registry[builderType] + if !ok { + return nil + } + return &structType +} + +func newBuilderStruct(builderType reflect.Type) *reflect.Value { + structType := getBuilderStructType(builderType) + if structType == nil { + return nil + } + newStruct := reflect.New(*structType).Elem() + return &newStruct +} diff --git a/vendor/github.com/lann/ps/LICENSE b/vendor/github.com/lann/ps/LICENSE new file mode 100644 index 000000000000..69f9ae8d5ab8 --- /dev/null +++ b/vendor/github.com/lann/ps/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Michael Hendricks + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lann/ps/README.md b/vendor/github.com/lann/ps/README.md new file mode 100644 index 000000000000..325a5b354086 --- /dev/null +++ b/vendor/github.com/lann/ps/README.md @@ -0,0 +1,10 @@ +**This is a stable fork of https://github.com/mndrix/ps; it will not introduce breaking changes.** + +ps +== + +Persistent data structures for Go. See the [full package documentation](http://godoc.org/github.com/lann/ps) + +Install with + + go get github.com/lann/ps diff --git a/vendor/github.com/lann/ps/list.go b/vendor/github.com/lann/ps/list.go new file mode 100644 index 000000000000..48a1cebacf6a --- /dev/null +++ b/vendor/github.com/lann/ps/list.go @@ -0,0 +1,93 @@ +package ps + +// List is a persistent list of possibly heterogenous values. +type List interface { + // IsNil returns true if the list is empty + IsNil() bool + + // Cons returns a new list with val as the head + Cons(val Any) List + + // Head returns the first element of the list; + // panics if the list is empty + Head() Any + + // Tail returns a list with all elements except the head; + // panics if the list is empty + Tail() List + + // Size returns the list's length. This takes O(1) time. + Size() int + + // ForEach executes a callback for each value in the list. + ForEach(f func(Any)) + + // Reverse returns a list whose elements are in the opposite order as + // the original list. + Reverse() List +} + +// Immutable (i.e. persistent) list +type list struct { + depth int // the number of nodes after, and including, this one + value Any + tail *list +} + +// An empty list shared by all lists +var nilList = &list{} + +// NewList returns a new, empty list. The result is a singly linked +// list implementation. All lists share an empty tail, so allocating +// empty lists is efficient in time and memory. +func NewList() List { + return nilList +} + +func (self *list) IsNil() bool { + return self == nilList +} + +func (self *list) Size() int { + return self.depth +} + +func (tail *list) Cons(val Any) List { + var xs list + xs.depth = tail.depth + 1 + xs.value = val + xs.tail = tail + return &xs +} + +func (self *list) Head() Any { + if self.IsNil() { + panic("Called Head() on an empty list") + } + + return self.value +} + +func (self *list) Tail() List { + if self.IsNil() { + panic("Called Tail() on an empty list") + } + + return self.tail +} + +// ForEach executes a callback for each value in the list +func (self *list) ForEach(f func(Any)) { + if self.IsNil() { + return + } + f(self.Head()) + self.Tail().ForEach(f) +} + +// Reverse returns a list with elements in opposite order as this list +func (self *list) Reverse() List { + reversed := NewList() + self.ForEach(func(v Any) { reversed = reversed.Cons(v) }) + return reversed +} diff --git a/vendor/github.com/lann/ps/map.go b/vendor/github.com/lann/ps/map.go new file mode 100644 index 000000000000..192daec8f38a --- /dev/null +++ b/vendor/github.com/lann/ps/map.go @@ -0,0 +1,311 @@ +// Fully persistent data structures. A persistent data structure is a data +// structure that always preserves the previous version of itself when +// it is modified. Such data structures are effectively immutable, +// as their operations do not update the structure in-place, but instead +// always yield a new structure. +// +// Persistent +// data structures typically share structure among themselves. This allows +// operations to avoid copying the entire data structure. +package ps + +import ( + "bytes" + "fmt" +) + +// Any is a shorthand for Go's verbose interface{} type. +type Any interface{} + +// A Map associates unique keys (type string) with values (type Any). +type Map interface { + // IsNil returns true if the Map is empty + IsNil() bool + + // Set returns a new map in which key and value are associated. + // If the key didn't exist before, it's created; otherwise, the + // associated value is changed. + // This operation is O(log N) in the number of keys. + Set(key string, value Any) Map + + // Delete returns a new map with the association for key, if any, removed. + // This operation is O(log N) in the number of keys. + Delete(key string) Map + + // Lookup returns the value associated with a key, if any. If the key + // exists, the second return value is true; otherwise, false. + // This operation is O(log N) in the number of keys. + Lookup(key string) (Any, bool) + + // Size returns the number of key value pairs in the map. + // This takes O(1) time. + Size() int + + // ForEach executes a callback on each key value pair in the map. + ForEach(f func(key string, val Any)) + + // Keys returns a slice with all keys in this map. + // This operation is O(N) in the number of keys. + Keys() []string + + String() string +} + +// Immutable (i.e. persistent) associative array +const childCount = 8 +const shiftSize = 3 + +type tree struct { + count int + hash uint64 // hash of the key (used for tree balancing) + key string + value Any + children [childCount]*tree +} + +var nilMap = &tree{} + +// Recursively set nilMap's subtrees to point at itself. +// This eliminates all nil pointers in the map structure. +// All map nodes are created by cloning this structure so +// they avoid the problem too. +func init() { + for i := range nilMap.children { + nilMap.children[i] = nilMap + } +} + +// NewMap allocates a new, persistent map from strings to values of +// any type. +// This is currently implemented as a path-copying binary tree. +func NewMap() Map { + return nilMap +} + +func (self *tree) IsNil() bool { + return self == nilMap +} + +// clone returns an exact duplicate of a tree node +func (self *tree) clone() *tree { + var m tree + m = *self + return &m +} + +// constants for FNV-1a hash algorithm +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +// hashKey returns a hash code for a given string +func hashKey(key string) uint64 { + hash := offset64 + for _, codepoint := range key { + hash ^= uint64(codepoint) + hash *= prime64 + } + return hash +} + +// Set returns a new map similar to this one but with key and value +// associated. If the key didn't exist, it's created; otherwise, the +// associated value is changed. +func (self *tree) Set(key string, value Any) Map { + hash := hashKey(key) + return setLowLevel(self, hash, hash, key, value) +} + +func setLowLevel(self *tree, partialHash, hash uint64, key string, value Any) *tree { + if self.IsNil() { // an empty tree is easy + m := self.clone() + m.count = 1 + m.hash = hash + m.key = key + m.value = value + return m + } + + if hash != self.hash { + m := self.clone() + i := partialHash % childCount + m.children[i] = setLowLevel(self.children[i], partialHash>>shiftSize, hash, key, value) + recalculateCount(m) + return m + } + + // replacing a key's previous value + m := self.clone() + m.value = value + return m +} + +// modifies a map by recalculating its key count based on the counts +// of its subtrees +func recalculateCount(m *tree) { + count := 0 + for _, t := range m.children { + count += t.Size() + } + m.count = count + 1 // add one to count ourself +} + +func (m *tree) Delete(key string) Map { + hash := hashKey(key) + newMap, _ := deleteLowLevel(m, hash, hash) + return newMap +} + +func deleteLowLevel(self *tree, partialHash, hash uint64) (*tree, bool) { + // empty trees are easy + if self.IsNil() { + return self, false + } + + if hash != self.hash { + i := partialHash % childCount + child, found := deleteLowLevel(self.children[i], partialHash>>shiftSize, hash) + if !found { + return self, false + } + newMap := self.clone() + newMap.children[i] = child + recalculateCount(newMap) + return newMap, true // ? this wasn't in the original code + } + + // we must delete our own node + if self.isLeaf() { // we have no children + return nilMap, true + } + /* + if self.subtreeCount() == 1 { // only one subtree + for _, t := range self.children { + if t != nilMap { + return t, true + } + } + panic("Tree with 1 subtree actually had no subtrees") + } + */ + + // find a node to replace us + i := -1 + size := -1 + for j, t := range self.children { + if t.Size() > size { + i = j + size = t.Size() + } + } + + // make chosen leaf smaller + replacement, child := self.children[i].deleteLeftmost() + newMap := replacement.clone() + for j := range self.children { + if j == i { + newMap.children[j] = child + } else { + newMap.children[j] = self.children[j] + } + } + recalculateCount(newMap) + return newMap, true +} + +// delete the leftmost node in a tree returning the node that +// was deleted and the tree left over after its deletion +func (m *tree) deleteLeftmost() (*tree, *tree) { + if m.isLeaf() { + return m, nilMap + } + + for i, t := range m.children { + if t != nilMap { + deleted, child := t.deleteLeftmost() + newMap := m.clone() + newMap.children[i] = child + recalculateCount(newMap) + return deleted, newMap + } + } + panic("Tree isn't a leaf but also had no children. How does that happen?") +} + +// isLeaf returns true if this is a leaf node +func (m *tree) isLeaf() bool { + return m.Size() == 1 +} + +// returns the number of child subtrees we have +func (m *tree) subtreeCount() int { + count := 0 + for _, t := range m.children { + if t != nilMap { + count++ + } + } + return count +} + +func (m *tree) Lookup(key string) (Any, bool) { + hash := hashKey(key) + return lookupLowLevel(m, hash, hash) +} + +func lookupLowLevel(self *tree, partialHash, hash uint64) (Any, bool) { + if self.IsNil() { // an empty tree is easy + return nil, false + } + + if hash != self.hash { + i := partialHash % childCount + return lookupLowLevel(self.children[i], partialHash>>shiftSize, hash) + } + + // we found it + return self.value, true +} + +func (m *tree) Size() int { + return m.count +} + +func (m *tree) ForEach(f func(key string, val Any)) { + if m.IsNil() { + return + } + + // ourself + f(m.key, m.value) + + // children + for _, t := range m.children { + if t != nilMap { + t.ForEach(f) + } + } +} + +func (m *tree) Keys() []string { + keys := make([]string, m.Size()) + i := 0 + m.ForEach(func(k string, v Any) { + keys[i] = k + i++ + }) + return keys +} + +// make it easier to display maps for debugging +func (m *tree) String() string { + keys := m.Keys() + buf := bytes.NewBufferString("{") + for _, key := range keys { + val, _ := m.Lookup(key) + fmt.Fprintf(buf, "%s: %s, ", key, val) + } + fmt.Fprintf(buf, "}\n") + return buf.String() +} diff --git a/vendor/github.com/lann/ps/profile.sh b/vendor/github.com/lann/ps/profile.sh new file mode 100644 index 000000000000..f03df05a5aa9 --- /dev/null +++ b/vendor/github.com/lann/ps/profile.sh @@ -0,0 +1,3 @@ +#!/bin/sh +go test -c +./ps.test -test.run=none -test.bench=$2 -test.$1profile=$1.profile diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 000000000000..3243952a4d60 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,6 @@ +.db +*.test +*~ +*.swp +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 000000000000..5773904a30e9 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 000000000000..126ee5d35d18 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,36 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) + +## Install + + go get github.com/lib/pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Status + +This package is currently in maintenance mode, which means: +1. It generally does not accept new features. +2. It does accept bug fixes and version compatability changes provided by the community. +3. Maintainers usually do not resolve reported issues. +4. Community members are encouraged to help each other with reported issues. + +For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 000000000000..f05021115be2 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 000000000000..39c8f7e2e032 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,895 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []float32: + return (*Float32Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []int32: + return (*Int32Array)(&a) + case []string: + return (*StringArray)(&a) + case [][]byte: + return (*ByteaArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]float32: + return (*Float32Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]int32: + return (*Int32Array)(a) + case *[]string: + return (*StringArray)(a) + case *[][]byte: + return (*ByteaArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Float32Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float32Array []float32 + +// Scan implements the sql.Scanner interface. +func (a *Float32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float32Array", src) +} + +func (a *Float32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float32Array, len(elems)) + for i, v := range elems { + var x float64 + if x, err = strconv.ParseFloat(string(v), 32); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = float32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Int32Array represents a one-dimensional array of the PostgreSQL integer types. +type Int32Array []int32 + +// Scan implements the sql.Scanner interface. +func (a *Int32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int32Array", src) +} + +func (a *Int32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int32Array, len(elems)) + for i, v := range elems { + x, err := strconv.ParseInt(string(v), 10, 32) + if err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = int32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, int64(a[0]), 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, int64(a[i]), 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 000000000000..4b0a0a8f7e9d --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 000000000000..da4ff9de6033 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,2112 @@ +package pq + +import ( + "bufio" + "bytes" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") + + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Compile time validation that our types implement the expected interfaces +var ( + _ driver.Driver = Driver{} +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout( + network, address string, timeout time.Duration, +) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If an error is set, this connection is bad and all public-facing + // functions should return the appropriate error by calling get() + // (ErrBadConn) or getForNext(). + err syncErr + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +type syncErr struct { + err error + sync.Mutex +} + +// Return ErrBadConn if connection is bad. +func (e *syncErr) get() error { + e.Lock() + defer e.Unlock() + if e.err != nil { + return driver.ErrBadConn + } + return nil +} + +// Return the error set on the connection. Currently only used by rows.Next. +func (e *syncErr) getForNext() error { + e.Lock() + defer e.Unlock() + return e.err +} + +// Set error, only if it isn't set yet. +func (e *syncErr) set(err error) { + if err == nil { + panic("attempt to set nil err") + } + e.Lock() + defer e.Unlock() + if e.err == nil { + e.err = err + } +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + // From: https://github.com/tg/pgpass/blob/master/reader.go + for scanner.Scan() { + if scanText(scanner.Text(), o) { + break + } + } +} + +// GetFields is a helper function for scanText. +func getFields(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) +} + +// ScanText assists HandlePgpass in it's objective. +func scanText(line string, o values) bool { + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + if len(line) == 0 || line[0] == '#' { + return false + } + split := getFields(line) + if len(split) != 5 { + return false + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return true + } + return false +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.Dialer(d) + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + // Create a new values map (copy). This makes it so maps in different + // connections do not reference the same underlying data structure, so it + // is safe for multiple connections to concurrently write to their opts. + o := make(values) + for k, v := range c.opts { + o[k] = v + } + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.err.set(driver.ErrBadConn) + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "COMMIT" { + cn.err.set(driver.ErrBadConn) + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' { + res.result, res.tag = cn.parseComplete(r.string()) + if res.colNames != nil { + return + } + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats( + colTyps []fieldDesc, forceText bool, +) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +type safeRetryError struct { + Err error +} + +func (se *safeRetryError) Error() string { + return se.Err.Error() +} + +func (cn *conn) send(m *writeBuf) { + n, err := cn.c.Write(m.wrap()) + if err != nil { + if n == 0 { + err = &safeRetryError{Err: err} + } + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline", "sslsni": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + case "krbsrvname": + return true + case "krbspn": + return true + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if err := st.cn.err.get(); err != nil { + return err + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.err.set(driver.ErrBadConn) + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.err.set(driver.ErrBadConn) + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + return st.query(v) +} + +func (st *stmt) query(v []driver.Value) (r *rows, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if err := conn.err.getForNext(); err != nil { + return err + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a +// byte buffer. +func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + buffer.WriteRune('"') + buffer.WriteString(strings.Replace(name, `"`, `""`, -1)) + buffer.WriteRune('"') +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() ( + paramTyps []oid.Oid, + colNames []string, + colTyps []fieldDesc, +) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse( + protocolState string, +) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGSSLSNI": + accrue("sslsni") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} + +// The database/sql/driver package says: +// All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator. +var _ driver.Pinger = &conn{} +var _ driver.SessionResetter = &conn{} + +func (cn *conn) ResetSession(ctx context.Context) error { + // Ensure bad connections are reported: From database/sql/driver: + // If a connection is never returned to the connection pool but immediately reused, then + // ResetSession is called prior to reuse but IsValid is not called. + return cn.err.get() +} + +func (cn *conn) IsValid() bool { + return cn.err.get() == nil +} diff --git a/vendor/github.com/lib/pq/conn_go115.go b/vendor/github.com/lib/pq/conn_go115.go new file mode 100644 index 000000000000..f4ef030f9902 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go115.go @@ -0,0 +1,8 @@ +//go:build go1.15 +// +build go1.15 + +package pq + +import "database/sql/driver" + +var _ driver.Validator = &conn{} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 000000000000..63d4ca6aaa05 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,247 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +const ( + watchCancelDialContextTimeout = time.Second * 10 +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnPrepareContext" interface +func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + return cn.Prepare(query) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}, 1) + go func() { + select { + case <-done: + select { + case finished <- struct{}{}: + default: + // We raced with the finish func, let the next query handle this with the + // context. + return + } + + // Set the connection state to bad so it does not get reused. + cn.err.set(ctx.Err()) + + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = cn.cancel(ctxCancel) + case <-finished: + } + }() + return func() { + select { + case <-finished: + cn.err.set(ctx.Err()) + cn.Close() + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + // Create a new values map (copy). This makes sure the connection created + // in this method cannot write to the same underlying data, which could + // cause a concurrent map write panic. This is necessary because cancel + // is called from a goroutine in watchCancel. + o := make(values) + for k, v := range cn.opts { + o[k] = v + } + + c, err := dial(ctx, cn.dialer, o) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(o) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} + +// Implement the "StmtQueryContext" interface +func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := st.watchCancel(ctx) + r, err := st.query(list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "StmtExecContext" interface +func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := st.watchCancel(ctx); finish != nil { + defer finish() + } + + return st.Exec(list) +} + +// watchCancel is implemented on stmt in order to not mark the parent conn as bad +func (st *stmt) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = st.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (st *stmt) cancel(ctx context.Context) error { + return st.cn.cancel(ctx) +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 000000000000..1145e12257ad --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,120 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Dialer allows change the dialer used to open connections. +func (c *Connector) Dialer(dialer Dialer) { + c.dialer = dialer +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 000000000000..a8f16b2b26b4 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,348 @@ +package pq + +import ( + "bytes" + "context" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() +} + +// MakeStmt makes the stmt string for CopyIn and CopyInSchema. +func makeStmt(buffer *bytes.Buffer, columns ...string) { + //s := bytes.NewBufferString() + for i, col := range columns { + if i != 0 { + buffer.WriteString(", ") + } + BufferQuoteIdentifier(col, buffer) + } + buffer.WriteString(") FROM STDIN") +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(schema, buffer) + buffer.WriteRune('.') + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + mu struct { + sync.Mutex + err error + driver.Result + } +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad(driver.ErrBadConn) + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad(driver.ErrBadConn) + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) + case 'N': + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad(driver.ErrBadConn) + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad(err error) { + ci.cn.err.set(err) +} + +func (ci *copyin) getBad() error { + return ci.cn.err.get() +} + +func (ci *copyin) err() error { + ci.mu.Lock() + err := ci.mu.err + ci.mu.Unlock() + return err +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.mu.Lock() + if ci.mu.err == nil { + ci.mu.err = err + } + ci.mu.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.mu.Lock() + ci.mu.Result = result + ci.mu.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.mu.Lock() + result := ci.mu.Result + ci.mu.Unlock() + if result == nil { + return driver.RowsAffected(0) + } + return result +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + if len(v) == 0 { + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +// CopyData inserts a raw string into the COPY stream. The insert is +// asynchronous and CopyData can return errors from previous CopyData calls to +// the same COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if finish := ci.cn.watchCancel(ctx); finish != nil { + defer finish() + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + ci.buffer = append(ci.buffer, []byte(line)...) + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if err := ci.getBad(); err != nil { + return err + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if err := ci.err(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 000000000000..b57184801ba9 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,268 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 000000000000..bffe6096aff6 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,632 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // Check for a minute and second offset in the timezone. + if typ == oid.T_timestamptz || typ == oid.T_timetz { + for i := 3; i <= 6; i += 3 { + if str[len(str)-i] == ':' { + f += ":00" + continue + } + break + } + } + + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + if is2400Time { + t = t.Add(24 * time.Hour) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+Z ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' or 'Z' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } else if tzStart < len(str) && str[tzStart] == 'Z' { + // time zone Z separator indicates UTC is +00 + remainderIdx += 1 + } + + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset %= 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseUint(string(s[1:4]), 8, 8) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 000000000000..f67c5a5fa641 --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,523 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// SQLState returns the SQLState of the error. +func (err *Error) SQLState() string { + return string(err.Code) +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err *Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.err.set(driver.ErrBadConn) + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.err.set(driver.ErrBadConn) + *err = v + case *safeRetryError: + cn.err.set(driver.ErrBadConn) + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.err.set(driver.ErrBadConn) + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.err.set(driver.ErrBadConn) + } +} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 000000000000..408ec01f9779 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 000000000000..70ad122a7d6b --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,72 @@ +//go:build go1.10 +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 000000000000..5c421fdb8b56 --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,858 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'S': + // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 000000000000..caaede2489df --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 000000000000..ecc84c2c862d --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 000000000000..c6aa5b9a36a5 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 000000000000..477216b6008a --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 000000000000..36b61ba45be2 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,204 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + "strings" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + // Set Server Name Indication (SNI), if enabled by connection parameters. + // By default SNI is on, any value which is not starting with "1" disables + // SNI -- that is the same check vanilla libpq uses. + if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { + // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 + // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so + // just always set ServerName here and let crypto/tls do the filtering. + tlsConf.ServerName = o["host"] + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + sslinline := o["sslinline"] + if sslinline == "true" { + cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) + if err != nil { + return err + } + tlsConf.Certificates = []tls.Certificate{cert} + return nil + } + + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + sslinline := o["sslinline"] + + var cert []byte + if sslinline == "true" { + cert = []byte(sslrootcert) + } else { + var err error + cert, err = ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 000000000000..d587f102edb2 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,93 @@ +//go:build !windows +// +build !windows + +package pq + +import ( + "errors" + "os" + "syscall" +) + +const ( + rootUserID = uint32(0) + + // The maximum permissions that a private key file owned by a regular user + // is allowed to have. This translates to u=rw. + maxUserOwnedKeyPermissions os.FileMode = 0600 + + // The maximum permissions that a private key file owned by root is allowed + // to have. This translates to u=rw,g=r. + maxRootOwnedKeyPermissions os.FileMode = 0640 +) + +var ( + errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") + errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") +) + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + + err = hasCorrectPermissions(info) + + // return ErrSSLKeyHasWorldPermissions for backwards compatability with + // existing code. + if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { + err = ErrSSLKeyHasWorldPermissions + } + return err +} + +// hasCorrectPermissions checks the file info (and the unix-specific stat_t +// output) to verify that the permissions on the file are correct. +// +// If the file is owned by the same user the process is running as, +// the file should only have 0600 (u=rw). If the file is owned by root, +// and the group matches the group that the process is running in, the +// permissions cannot be more than 0640 (u=rw,g=r). The file should +// never have world permissions. +// +// Returns an error when the permission check fails. +func hasCorrectPermissions(info os.FileInfo) error { + // if file's permission matches 0600, allow access. + userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) + + // regardless of if we're running as root or not, 0600 is acceptable, + // so we return if we match the regular user permission mask. + if info.Mode().Perm()&userPermissionMask == 0 { + return nil + } + + // We need to pull the Unix file information to get the file's owner. + // If we can't access it, there's some sort of operating system level error + // and we should fail rather than attempting to use faulty information. + sysInfo := info.Sys() + if sysInfo == nil { + return ErrSSLKeyUnknownOwnership + } + + unixStat, ok := sysInfo.(*syscall.Stat_t) + if !ok { + return ErrSSLKeyUnknownOwnership + } + + // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what + // Postgres does. + if unixStat.Uid == rootUserID { + rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) + if info.Mode().Perm()&rootPermissionMask != 0 { + return errSSLKeyHasUnacceptableRootPermissions + } + return nil + } + + return errSSLKeyHasUnacceptableUserPermissions +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 000000000000..73663c8f157c --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,10 @@ +//go:build windows +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 000000000000..aec6e95be8b2 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_other.go b/vendor/github.com/lib/pq/user_other.go new file mode 100644 index 000000000000..3dae8f5572b3 --- /dev/null +++ b/vendor/github.com/lib/pq/user_other.go @@ -0,0 +1,10 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build js || android || hurd || zos +// +build js android hurd zos + +package pq + +func userCurrent() (string, error) { + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 000000000000..5f2d439bc426 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,25 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos +// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 000000000000..2b691267b977 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 000000000000..9a1b9e0748ed --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 000000000000..6a21813a3e30 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go generate + - git diff --cached --exit-code + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 000000000000..91b5cef30ebd --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md new file mode 100644 index 000000000000..aa56ab96c2b9 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.md @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh new file mode 100644 index 000000000000..012162b077c9 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 000000000000..19f8e0449b75 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,257 @@ +package runewidth + +import ( + "os" +) + +//go:generate go run script/generate.go + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ + ZeroWidthJoiner bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{} +) + +func init() { + handleEnv() +} + +func handleEnv() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } + // update DefaultCondition + DefaultCondition.EastAsianWidth = EastAsianWidth + DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) >> 1 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool + ZeroWidthJoiner bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{ + EastAsianWidth: EastAsianWidth, + ZeroWidthJoiner: ZeroWidthJoiner, + } +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): + return 2 + default: + return 1 + } +} + +func (c *Condition) stringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +func (c *Condition) stringWidthZeroJoiner(s string) (width int) { + r1, r2 := rune(0), rune(0) + for _, r := range []rune(s) { + if r == 0xFE0E || r == 0xFE0F { + continue + } + w := c.RuneWidth(r) + if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) { + if width < w { + width = w + } + } else { + width += w + } + r1, r2 = r2, r + } + return width +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + if c.ZeroWidthJoiner { + return c.stringWidthZeroJoiner(s) + } + return c.stringWidth(s) +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go new file mode 100644 index 000000000000..7d99f6e52103 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package runewidth + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 000000000000..c5fdf40baa0e --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,9 @@ +// +build js +// +build !appengine + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 000000000000..480ad748538f --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,82 @@ +// +build !windows +// +build !js +// +build !appengine + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_ALL") + if locale == "" { + locale = os.Getenv("LC_CTYPE") + } + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go new file mode 100644 index 000000000000..b27d77d8911e --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -0,0 +1,437 @@ +// Code generated by script/generate.go. DO NOT EDIT. + +package runewidth + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, + {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, + {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, + {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, + {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, + {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, + {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, + {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, + {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, + {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, + {0x1E8D0, 0x1E8D6}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, + {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, + {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, + {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, + {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, + {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, + {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, + {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, + {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, + {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, + {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, + {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, + {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, + {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, + {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, + {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, + {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, + {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, + {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, + {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, + {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} +var notassigned = table{ + {0x27E6, 0x27ED}, {0x2985, 0x2986}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, + {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, + {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, + {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, + {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, + {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, + {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, + {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, + {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, + {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, + {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, + {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, + {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, + {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, + {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, + {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, + {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, + {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, + {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, + {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, + {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, + {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, + {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, + {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, + {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, + {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, + {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, + {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, + {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, + {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, + {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, + {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, + {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, + {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, + {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, + {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, + {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, + {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, + {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, + {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, + {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, + {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, + {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, + {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, + {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, + {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, + {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, + {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, + {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, + {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, + {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, + {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, + {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, + {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, + {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, + {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, + {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, + {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, + {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, + {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, + {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, + {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, + {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, + {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, + {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, + {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, + {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, + {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, + {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, + {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, + {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, + {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, + {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, + {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, + {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, + {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, + {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, + {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, + {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, + {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, + {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, + {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, + {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, + {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, + {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, + {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, + {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, + {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, + {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, + {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, + {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, + {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, + {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, + {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, + {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, + {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, + {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, + {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, + {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, + {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, + {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, + {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, + {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D}, + {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775}, + {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, + {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, + {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, + {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, + {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, + {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, + {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, + {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, + {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, + {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, + {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, + {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, + {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, + {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, + {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, + {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, + {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, + {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, + {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, + {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, + {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, + {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, + {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, + {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, + {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, + {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, + {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, + {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, + {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, + {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, + {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, + {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, + {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, + {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, + {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, + {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, + {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, + {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, + {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, + {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, + {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, + {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, + {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, + {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, + {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, + {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, + {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, + {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, + {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, + {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, + {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, + {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, + {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, + {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, + {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, + {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, + {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, + {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, + {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, + {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, + {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, + {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, + {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, + {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, + {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, + {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, + {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, + {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, + {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, + {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, + {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, + {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, + {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, + {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, + {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, + {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, + {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, + {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, + {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, + {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, + {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, + {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, + {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, + {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, + {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, + {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, + {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, + {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, + {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, + {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, +} + +var emoji = table{ + {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, + {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, + {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, + {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, + {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, + {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, + {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, + {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, + {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, + {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, + {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, + {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, + {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, + {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, + {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, + {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, + {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, + {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, + {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, + {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, + {0x1FC00, 0x1FFFD}, +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 000000000000..d6a61777d7b1 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,28 @@ +// +build windows +// +build !appengine + +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000000..229851590442 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000000..f0fbd2e5c98c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000000..db6a6aa1a1f4 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000000..8089e6670a34 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 000000000000..4f2ee4d97338 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000000..f9c841a51e0d --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000000..ac82cd2e159f --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000000..6a7f176117f9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000000..70760cf4c705 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000000..7fee7b050ba4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go index 2ec7706a16a3..579ce5530a7e 100644 --- a/vendor/github.com/moby/term/term_unix.go +++ b/vendor/github.com/moby/term/term_unix.go @@ -81,7 +81,7 @@ func setRawTerminal(fd uintptr) (*State, error) { return makeRaw(fd) } -func setRawTerminalOutput(fd uintptr) (*State, error) { +func setRawTerminalOutput(uintptr) (*State, error) { return nil, nil } diff --git a/vendor/github.com/rubenv/sql-migrate/.dockerignore b/vendor/github.com/rubenv/sql-migrate/.dockerignore new file mode 100644 index 000000000000..42bb921b466d --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.dockerignore @@ -0,0 +1,6 @@ +* +!go.mod +!go.sum +!*.go +!sql-migrate/*.go +!sqlparse/*.go diff --git a/vendor/github.com/rubenv/sql-migrate/.gitignore b/vendor/github.com/rubenv/sql-migrate/.gitignore new file mode 100644 index 000000000000..9720db0cfa16 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.gitignore @@ -0,0 +1,9 @@ +.*.swp +*.test +.idea +/vendor/ + +/sql-migrate/test.db +/test.db +.vscode/ +bin/ diff --git a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml new file mode 100644 index 000000000000..f5818416532f --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml @@ -0,0 +1,133 @@ +version: "2" +run: + tests: true +linters: + default: none + enable: + - asciicheck + - depguard + - errcheck + - errorlint + - exhaustive + - gocritic + - govet + - ineffassign + - nolintlint + - revive + - staticcheck + - unparam + - unused + - whitespace + settings: + depguard: + rules: + main: + allow: + - $gostd + - github.com/denisenkom/go-mssqldb + - github.com/go-sql-driver/mysql + - github.com/go-gorp/gorp/v3 + - github.com/lib/pq + - github.com/mattn/go-sqlite3 + - github.com/mitchellh/cli + - github.com/olekukonko/tablewriter + - github.com/rubenv/sql-migrate + - gopkg.in/check.v1 + - gopkg.in/yaml.v2 + exhaustive: + default-signifies-exhaustive: true + gocritic: + disabled-checks: + - ifElseChain + govet: + disable: + - fieldalignment + enable-all: true + nolintlint: + require-explanation: true + require-specific: true + allow-no-explanation: + - depguard + allow-unused: false + revive: + enable-all-rules: false + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: imports-blocklist + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: string-format + - name: string-of-int + - name: struct-tag + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: superfluous-else + - name: unreachable-code + - name: var-declaration + - name: waitgroup-by-value + - name: unused-receiver + - name: unnecessary-stmt + - name: unused-parameter + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: (.+)\.go$ + text: declaration of "err" shadows declaration at + - path: (.+)\.go$ + text: 'error-strings: error strings should not be capitalized or end with punctuation or a newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not end with punctuation or newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not be capitalized' + paths: + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 10000 + max-same-issues: 10000 +formatters: + enable: + - gofmt + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/rubenv/sql-migrate + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/rubenv/sql-migrate/Dockerfile b/vendor/github.com/rubenv/sql-migrate/Dockerfile new file mode 100644 index 000000000000..238ac714afb7 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/Dockerfile @@ -0,0 +1,25 @@ +ARG GO_VERSION=1.20.6 +ARG ALPINE_VERSION=3.12 + +### Vendor +FROM golang:${GO_VERSION} as vendor +COPY . /project +WORKDIR /project +RUN go mod tidy && go mod vendor + +### Build binary +FROM golang:${GO_VERSION} as build-binary +COPY . /project +COPY --from=vendor /project/vendor /project/vendor +WORKDIR /project +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 GO111MODULE=on go build \ + -v \ + -mod vendor \ + -o /project/bin/sql-migrate \ + /project/sql-migrate + +### Image +FROM alpine:${ALPINE_VERSION} as image +COPY --from=build-binary /project/bin/sql-migrate /usr/local/bin/sql-migrate +RUN chmod +x /usr/local/bin/sql-migrate +ENTRYPOINT ["sql-migrate"] diff --git a/vendor/github.com/rubenv/sql-migrate/LICENSE b/vendor/github.com/rubenv/sql-migrate/LICENSE new file mode 100644 index 000000000000..95e7c9abe7c3 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2014-2021 by Ruben Vermeersch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/Makefile b/vendor/github.com/rubenv/sql-migrate/Makefile new file mode 100644 index 000000000000..e17ae0ad4a92 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/Makefile @@ -0,0 +1,11 @@ +.PHONY: test lint build + +test: + go test ./... + +lint: + golangci-lint run --fix --config .golangci.yaml + +build: + mkdir -p bin + go build -o ./bin/sql-migrate ./sql-migrate diff --git a/vendor/github.com/rubenv/sql-migrate/README.md b/vendor/github.com/rubenv/sql-migrate/README.md new file mode 100644 index 000000000000..65fd7f70efe7 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/README.md @@ -0,0 +1,386 @@ +# sql-migrate + +> SQL Schema migration tool for [Go](https://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). + +[![Test](https://github.com/rubenv/sql-migrate/actions/workflows/test.yml/badge.svg)](https://github.com/rubenv/sql-migrate/actions/workflows/test.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/rubenv/sql-migrate.svg)](https://pkg.go.dev/github.com/rubenv/sql-migrate) + +## Features + +- Usable as a CLI tool or as a library +- Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) +- Can embed migrations into your application +- Migrations are defined with SQL for full flexibility +- Atomic migrations +- Up/down migrations to allow rollback +- Supports multiple database types in one project +- Works great with other libraries such as [sqlx](https://jmoiron.github.io/sqlx/) +- Supported on go1.13+ + +## Installation + +To install the library and command line program, use the following: + +```bash +go get -v github.com/rubenv/sql-migrate/... +``` + +For Go version from 1.18, use: + +```bash +go install github.com/rubenv/sql-migrate/...@latest +``` + +## Usage + +### As a standalone tool + +``` +$ sql-migrate --help +usage: sql-migrate [--version] [--help] [] + +Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available +``` + +Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: + +```yml +development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + +production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations +``` + +(See more examples for different set ups [here](test-integration/dbconfig.yml)) + +Also one can obtain env variables in datasource field via `os.ExpandEnv` embedded call for the field. +This may be useful if one doesn't want to store credentials in file: + +```yml +production: + dialect: postgres + datasource: host=prodhost dbname=proddb user=${DB_USER} password=${DB_PASSWORD} sslmode=require + dir: migrations + table: migrations +``` + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the `-env` flag (defaults to `development`). + +Use the `--help` flag in combination with any of the commands to get an overview of its usage: + +``` +$ sql-migrate up --help +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -version Run migrate up to a specific version, eg: the version number of migration 1_initial.sql is 1. + -dryrun Don't apply migrations, just print them. +``` + +The `new` command creates a new empty migration template using the following pattern `-.sql`. + +The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter, and the `-version` parameter. Note `-version` has higher priority than `-limit` if you try to use them both. + +The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the `status` command to see the state of the applied migrations: + +```bash +$ sql-migrate status ++---------------+-----------------------------------------+ +| MIGRATION | APPLIED | ++---------------+-----------------------------------------+ +| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | +| 2_record.sql | no | ++---------------+-----------------------------------------+ +``` + +#### Running Test Integrations + +You can see how to run setups for different setups by executing the `.sh` files in [test-integration](test-integration/) + +```bash +# Run mysql-env.sh example (you need to be in the project root directory) + +./test-integration/mysql-env.sh +``` + +### MySQL Caveat + +If you are using MySQL, you must append `?parseTime=true` to the `datasource` configuration. For example: + +```yml +production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations +``` + +See [here](https://github.com/go-sql-driver/mysql#parsetime) for more information. + +### Oracle (oci8) + +Oracle Driver is [oci8](https://github.com/mattn/go-oci8), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [oci8 repo](https://github.com/mattn/go-oci8). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +```bash +go get -tags oracle -v github.com/rubenv/sql-migrate/... +``` + +```yml +development: + dialect: oci8 + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### Oracle (godror) + +Oracle Driver is [godror](https://github.com/godror/godror), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [godror repository](https://github.com/godror/godror). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +1. Install sql-migrate + +```bash +go get -tags godror -v github.com/rubenv/sql-migrate/... +``` + +2. Download Oracle Office Client(e.g. macos, click [Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html) if you are other system) + +```bash +wget https://download.oracle.com/otn_software/mac/instantclient/193000/instantclient-basic-macos.x64-19.3.0.0.0dbru.zip +``` + +3. Configure environment variables `LD_LIBRARY_PATH` + +``` +export LD_LIBRARY_PATH=your_oracle_office_path/instantclient_19_3 +``` + +```yml +development: + dialect: godror + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### As a library + +Import sql-migrate into your application: + +```go +import "github.com/rubenv/sql-migrate" +``` + +Set up a source of migrations, this can be from memory, from a set of files, from bindata (more on that later), or from any library that implements [`http.FileSystem`](https://godoc.org/net/http#FileSystem): + +```go +// Hardcoded strings in memory: +migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, +} + +// OR: Read migrations from a folder: +migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", +} + +// OR: Use migrations from a packr box +// Note: Packr is no longer supported, your best option these days is [embed](https://pkg.go.dev/embed) +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} + +// OR: Use pkger which implements `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: pkger.Dir("/db/migrations"), +} + +// OR: Use migrations from bindata: +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// OR: Read migrations from a `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +Then use the `Exec` function to upgrade your database: + +```go +db, err := sql.Open("sqlite3", filename) +if err != nil { + // Handle errors! +} + +n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) +if err != nil { + // Handle errors! +} +fmt.Printf("Applied %d migrations!\n", n) +``` + +Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. + +## Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + +```sql +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; +``` + +You can put multiple statements in each block, as long as you end them with a semicolon (`;`). + +You can alternatively set up a separator string that matches an entire line by setting `sqlparse.LineSeparator`. This +can be used to imitate, for example, MS SQL Query Analyzer functionality where commands can be separated by a line with +contents of `GO`. If `sqlparse.LineSeparator` is matched, it will not be included in the resulting migration scripts. + +If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: + +```sql +-- +migrate Up +CREATE TABLE people (id int); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION do_something() +returns void AS $$ +DECLARE + create_query text; +BEGIN + -- Do something here +END; +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +DROP FUNCTION do_something(); +DROP TABLE people; +``` + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the `notransaction` option: + +```sql +-- +migrate Up notransaction +CREATE UNIQUE INDEX CONCURRENTLY people_unique_id_idx ON people (id); + +-- +migrate Down +DROP INDEX people_unique_id_idx; +``` + +## Embedding migrations with [embed](https://pkg.go.dev/embed) + +If you like your Go applications self-contained (that is: a single binary): use [embed](https://pkg.go.dev/embed) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Import the embed package into your application and point it to your migrations: + +```go +import "embed" + +//go:embed migrations/* +var dbMigrations embed.FS +``` + +Use the `EmbedFileSystemMigrationSource` in your application to find the migrations: + +```go +migrations := migrate.EmbedFileSystemMigrationSource{ + FileSystem: dbMigrations, + Root: "migrations", +} +``` + +Other options such as [packr](https://github.com/gobuffalo/packr) or [go-bindata](https://github.com/shuLhan/go-bindata) are no longer recommended. + +## Embedding migrations with libraries that implement `http.FileSystem` + +You can also embed migrations with any library that implements `http.FileSystem`, like [`vfsgen`](https://github.com/shurcooL/vfsgen), [`parcello`](https://github.com/phogolabs/parcello), or [`go-resources`](https://github.com/omeid/go-resources). + +```go +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +## Extending + +Adding a new migration source means implementing `MigrationSource`. + +```go +type MigrationSource interface { + FindMigrations() ([]*Migration, error) +} +``` + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. + +## Usage with [sqlx](https://jmoiron.github.io/sqlx/) + +This library is compatible with sqlx. When calling migrate just dereference the DB from your `*sqlx.DB`: + +``` +n, err := migrate.Exec(db.DB, "sqlite3", migrations, migrate.Up) + // ^^^ <-- Here db is a *sqlx.DB, the db.DB field is the plain sql.DB +if err != nil { + // Handle errors! +} +``` + +## Questions or Feedback? + +You can use Github Issues for feedback or questions. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/doc.go b/vendor/github.com/rubenv/sql-migrate/doc.go new file mode 100644 index 000000000000..8ff186d0ff8f --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/doc.go @@ -0,0 +1,238 @@ +/* +SQL Schema migration tool for Go. + +Key features: + + - Usable as a CLI tool or as a library + - Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) + - Can embed migrations into your application + - Migrations are defined with SQL for full flexibility + - Atomic migrations + - Up/down migrations to allow rollback + - Supports multiple database types in one project + +# Installation + +To install the library and command line program, use the following: + + go get -v github.com/rubenv/sql-migrate/... + +# Command-line tool + +The main command is called sql-migrate. + + $ sql-migrate --help + usage: sql-migrate [--version] [--help] [] + + Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available + +Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: + + development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + + production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the -env flag (defaults to development). + +Use the --help flag in combination with any of the commands to get an overview of its usage: + + $ sql-migrate up --help + Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + + Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. + +The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the status command to see the state of the applied migrations: + + $ sql-migrate status + +---------------+-----------------------------------------+ + | MIGRATION | APPLIED | + +---------------+-----------------------------------------+ + | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | + | 2_record.sql | no | + +---------------+-----------------------------------------+ + +# MySQL Caveat + +If you are using MySQL, you must append ?parseTime=true to the datasource configuration. For example: + + production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations + +See https://github.com/go-sql-driver/mysql#parsetime for more information. + +# Library + +Import sql-migrate into your application: + + import "github.com/rubenv/sql-migrate" + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + + // Hardcoded strings in memory: + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, + } + + // OR: Read migrations from a folder: + migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", + } + + // OR: Use migrations from bindata: + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", + } + +Then use the Exec function to upgrade your database: + + db, err := sql.Open("sqlite3", filename) + if err != nil { + // Handle errors! + } + + n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + if err != nil { + // Handle errors! + } + fmt.Printf("Applied %d migrations!\n", n) + +Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +The full set of capabilities can be found in the API docs below. + +# Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + + -- +migrate Up + -- SQL in section 'Up' is executed when this migration is applied + CREATE TABLE people (id int); + + + -- +migrate Down + -- SQL section 'Down' is executed when this migration is rolled back + DROP TABLE people; + +You can put multiple statements in each block, as long as you end them with a semicolon (;). + +If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: + + -- +migrate Up + CREATE TABLE people (id int); + + -- +migrate StatementBegin + CREATE OR REPLACE FUNCTION do_something() + returns void AS $$ + DECLARE + create_query text; + BEGIN + -- Do something here + END; + $$ + language plpgsql; + -- +migrate StatementEnd + + -- +migrate Down + DROP FUNCTION do_something(); + DROP TABLE people; + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the notransaction option: + + -- +migrate Up notransaction + CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id); + + -- +migrate Down + DROP INDEX people_unique_id_idx; + +# Embedding migrations with packr + +If you like your Go applications self-contained (that is: a single binary): use packr (https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Use the PackrMigrationSource in your application to find the migrations: + + migrations := &migrate.PackrMigrationSource{ + Box: packr.NewBox("./migrations"), + } + +If you already have a box and would like to use a subdirectory: + + migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", + } + +# Embedding migrations with bindata + +As an alternative, but slightly less maintained, you can use bindata (https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a .go file with the migrations embedded: + + go-bindata -pkg myapp -o bindata.go db/migrations/ + +The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). + +Use the AssetMigrationSource in your application to find the migrations: + + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", + } + +Both Asset and AssetDir are functions provided by bindata. + +Then proceed as usual. + +# Extending + +Adding a new migration source means implementing MigrationSource. + + type MigrationSource interface { + FindMigrations() ([]*Migration, error) + } + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. +*/ +package migrate diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go new file mode 100644 index 000000000000..c9cb4a48b62a --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -0,0 +1,891 @@ +package migrate + +import ( + "bytes" + "context" + "database/sql" + "embed" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/go-gorp/gorp/v3" + + "github.com/rubenv/sql-migrate/sqlparse" +) + +type MigrationDirection int + +const ( + Up MigrationDirection = iota + Down +) + +// MigrationSet provides database parameters for a migration execution +type MigrationSet struct { + // TableName name of the table used to store migration info. + TableName string + // SchemaName schema that the migration table be referenced. + SchemaName string + // IgnoreUnknown skips the check to see if there is a migration + // ran in the database that is not in MigrationSource. + // + // This should be used sparingly as it is removing a safety check. + IgnoreUnknown bool + // DisableCreateTable disable the creation of the migration table + DisableCreateTable bool +} + +var migSet = MigrationSet{} + +// NewMigrationSet returns a parametrized Migration object +func (ms MigrationSet) getTableName() string { + if ms.TableName == "" { + return "gorp_migrations" + } + return ms.TableName +} + +var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) + +// PlanError happens where no migration plan could be created between the sets +// of already applied migrations and the currently found. For example, when the database +// contains a migration which is not among the migrations list found for an operation. +type PlanError struct { + Migration *Migration + ErrorMessage string +} + +func newPlanError(migration *Migration, errorMessage string) error { + return &PlanError{ + Migration: migration, + ErrorMessage: errorMessage, + } +} + +func (p *PlanError) Error() string { + return fmt.Sprintf("Unable to create migration plan because of %s: %s", + p.Migration.Id, p.ErrorMessage) +} + +// TxError is returned when any error is encountered during a database +// transaction. It contains the relevant *Migration and notes it's Id in the +// Error function output. +type TxError struct { + Migration *Migration + Err error +} + +func newTxError(migration *PlannedMigration, err error) error { + return &TxError{ + Migration: migration.Migration, + Err: err, + } +} + +func (e *TxError) Error() string { + return e.Err.Error() + " handling " + e.Migration.Id +} + +// Set the name of the table used to store migration info. +// +// Should be called before any other call such as (Exec, ExecMax, ...). +func SetTable(name string) { + if name != "" { + migSet.TableName = name + } +} + +// SetSchema sets the name of a schema that the migration table be referenced. +func SetSchema(name string) { + if name != "" { + migSet.SchemaName = name + } +} + +// SetDisableCreateTable sets the boolean to disable the creation of the migration table +func SetDisableCreateTable(disable bool) { + migSet.DisableCreateTable = disable +} + +// SetIgnoreUnknown sets the flag that skips database check to see if there is a +// migration in the database that is not in migration source. +// +// This should be used sparingly as it is removing a safety check. +func SetIgnoreUnknown(v bool) { + migSet.IgnoreUnknown = v +} + +type Migration struct { + Id string + Up []string + Down []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +func (m Migration) Less(other *Migration) bool { + switch { + case m.isNumeric() && other.isNumeric() && m.VersionInt() != other.VersionInt(): + return m.VersionInt() < other.VersionInt() + case m.isNumeric() && !other.isNumeric(): + return true + case !m.isNumeric() && other.isNumeric(): + return false + default: + return m.Id < other.Id + } +} + +func (m Migration) isNumeric() bool { + return len(m.NumberPrefixMatches()) > 0 +} + +func (m Migration) NumberPrefixMatches() []string { + return numberPrefixRegex.FindStringSubmatch(m.Id) +} + +func (m Migration) VersionInt() int64 { + v := m.NumberPrefixMatches()[1] + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) + } + return value +} + +type PlannedMigration struct { + *Migration + + DisableTransaction bool + Queries []string +} + +type byId []*Migration + +func (b byId) Len() int { return len(b) } +func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } + +type MigrationRecord struct { + Id string `db:"id"` + AppliedAt time.Time `db:"applied_at"` +} + +type OracleDialect struct { + gorp.OracleDialect +} + +func (OracleDialect) IfTableNotExists(command, _, _ string) string { + return command +} + +func (OracleDialect) IfSchemaNotExists(command, _ string) string { + return command +} + +func (OracleDialect) IfTableExists(command, _, _ string) string { + return command +} + +var MigrationDialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}, + "mssql": gorp.SqlServerDialect{}, + "oci8": OracleDialect{}, + "godror": OracleDialect{}, + "snowflake": gorp.SnowflakeDialect{}, +} + +type MigrationSource interface { + // Finds the migrations. + // + // The resulting slice of migrations should be sorted by Id. + FindMigrations() ([]*Migration, error) +} + +// A hardcoded set of migrations, in-memory. +type MemoryMigrationSource struct { + Migrations []*Migration +} + +var _ MigrationSource = (*MemoryMigrationSource)(nil) + +func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { + // Make sure migrations are sorted. In order to make the MemoryMigrationSource safe for + // concurrent use we should not mutate it in place. So `FindMigrations` would sort a copy + // of the m.Migrations. + migrations := make([]*Migration, len(m.Migrations)) + copy(migrations, m.Migrations) + sort.Sort(byId(migrations)) + return migrations, nil +} + +// A set of migrations loaded from an http.FileServer + +type HttpFileSystemMigrationSource struct { + FileSystem http.FileSystem +} + +var _ MigrationSource = (*HttpFileSystemMigrationSource)(nil) + +func (f HttpFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(f.FileSystem, "/") +} + +// A set of migrations loaded from a directory. +type FileMigrationSource struct { + Dir string +} + +var _ MigrationSource = (*FileMigrationSource)(nil) + +func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { + filesystem := http.Dir(f.Dir) + return findMigrations(filesystem, "/") +} + +func findMigrations(dir http.FileSystem, root string) ([]*Migration, error) { + migrations := make([]*Migration, 0) + + file, err := dir.Open(root) + if err != nil { + return nil, err + } + + files, err := file.Readdir(0) + if err != nil { + return nil, err + } + + for _, info := range files { + if strings.HasSuffix(info.Name(), ".sql") { + migration, err := migrationFromFile(dir, root, info) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +func migrationFromFile(dir http.FileSystem, root string, info os.FileInfo) (*Migration, error) { + path := path.Join(root, info.Name()) + file, err := dir.Open(path) + if err != nil { + return nil, fmt.Errorf("Error while opening %s: %w", info.Name(), err) + } + defer func() { _ = file.Close() }() + + migration, err := ParseMigration(info.Name(), file) + if err != nil { + return nil, fmt.Errorf("Error while parsing %s: %w", info.Name(), err) + } + return migration, nil +} + +// Migrations from a bindata asset set. +type AssetMigrationSource struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + + // Path in the bindata to use. + Dir string +} + +var _ MigrationSource = (*AssetMigrationSource)(nil) + +func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + files, err := a.AssetDir(a.Dir) + if err != nil { + return nil, err + } + + for _, name := range files { + if strings.HasSuffix(name, ".sql") { + file, err := a.Asset(path.Join(a.Dir, name)) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// A set of migrations loaded from an go1.16 embed.FS +type EmbedFileSystemMigrationSource struct { + FileSystem embed.FS + + Root string +} + +var _ MigrationSource = (*EmbedFileSystemMigrationSource)(nil) + +func (f EmbedFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(http.FS(f.FileSystem), f.Root) +} + +// Avoids pulling in the packr library for everyone, mimicks the bits of +// packr.Box that we need. +type PackrBox interface { + List() []string + Find(name string) ([]byte, error) +} + +// Migrations from a packr box. +type PackrMigrationSource struct { + Box PackrBox + + // Path in the box to use. + Dir string +} + +var _ MigrationSource = (*PackrMigrationSource)(nil) + +func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + items := p.Box.List() + + prefix := "" + dir := path.Clean(p.Dir) + if dir != "." { + prefix = fmt.Sprintf("%s/", dir) + } + + for _, item := range items { + if !strings.HasPrefix(item, prefix) { + continue + } + name := strings.TrimPrefix(item, prefix) + if strings.Contains(name, "/") { + continue + } + + if strings.HasSuffix(name, ".sql") { + file, err := p.Box.Find(item) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migration parsing +func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { + m := &Migration{ + Id: id, + } + + parsed, err := sqlparse.ParseMigration(r) + if err != nil { + return nil, fmt.Errorf("Error parsing migration (%s): %w", id, err) + } + + m.Up = parsed.UpStatements + m.Down = parsed.DownStatements + + m.DisableTransactionUp = parsed.DisableTransactionUp + m.DisableTransactionDown = parsed.DisableTransactionDown + + return m, nil +} + +type SqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Insert(list ...interface{}) error + Delete(list ...interface{}) (int64, error) +} + +// Execute a set of migrations +// +// Returns the number of applied migrations. +func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMaxContext(context.Background(), db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMaxContext(context.Background(), db, dialect, m, dir, 0) +} + +// Execute a set of migrations with an input context. +// +// Returns the number of applied migrations. +func ExecContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMaxContext(ctx, db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMaxContext(ctx, db, dialect, m, dir, 0) +} + +// Execute a set of migrations +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMax(db, dialect, m, dir, max) +} + +// Execute a set of migrations with an input context. +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMaxContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMaxContext(ctx, db, dialect, m, dir, max) +} + +// Execute a set of migrations +// +// Will apply at the target `version` of migration. Cannot be a negative value. +// +// Returns the number of applied migrations. +func ExecVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + return ExecVersionContext(context.Background(), db, dialect, m, dir, version) +} + +// Execute a set of migrations with an input context. +// +// Will apply at the target `version` of migration. Cannot be a negative value. +// +// Returns the number of applied migrations. +func ExecVersionContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + if version < 0 { + return 0, fmt.Errorf("target version %d should not be negative", version) + } + return migSet.ExecVersionContext(ctx, db, dialect, m, dir, version) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return ms.ExecMaxContext(context.Background(), db, dialect, m, dir, max) +} + +// Returns the number of applied migrations, but applies with an input context. +func (ms MigrationSet) ExecMaxContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := ms.PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + return ms.applyMigrations(ctx, dir, migrations, dbMap) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + return ms.ExecVersionContext(context.Background(), db, dialect, m, dir, version) +} + +func (ms MigrationSet) ExecVersionContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + migrations, dbMap, err := ms.PlanMigrationToVersion(db, dialect, m, dir, version) + if err != nil { + return 0, err + } + return ms.applyMigrations(ctx, dir, migrations, dbMap) +} + +// Applies the planned migrations and returns the number of applied migrations. +func (MigrationSet) applyMigrations(ctx context.Context, dir MigrationDirection, migrations []*PlannedMigration, dbMap *gorp.DbMap) (int, error) { + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + var err error + + if migration.DisableTransaction { + executor = dbMap.WithContext(ctx) + } else { + e, err := dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + executor = e.WithContext(ctx) + } + + for _, stmt := range migration.Queries { + // remove the semicolon from stmt, fix ORA-00922 issue in database oracle + stmt = strings.TrimSuffix(stmt, "\n") + stmt = strings.TrimSuffix(stmt, " ") + stmt = strings.TrimSuffix(stmt, ";") + if _, err := executor.Exec(stmt); err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + } + + switch dir { + case Up: + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + case Down: + _, err := executor.Delete(&MigrationRecord{ + Id: migration.Id, + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + default: + panic("Not possible") + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Plan a migration. +func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigration(db, dialect, m, dir, max) +} + +// Plan a migration to version. +func PlanMigrationToVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigrationToVersion(db, dialect, m, dir, version) +} + +// Plan a migration. +func (ms MigrationSet) PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return ms.planMigrationCommon(db, dialect, m, dir, max, -1) +} + +// Plan a migration to version. +func (ms MigrationSet) PlanMigrationToVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + return ms.planMigrationCommon(db, dialect, m, dir, 0, version) +} + +// A common method to plan a migration. +func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, nil, err + } + + migrations, err := m.FindMigrations() + if err != nil { + return nil, nil, err + } + + var migrationRecords []MigrationRecord + _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()))) + if err != nil { + return nil, nil, err + } + + // Sort migrations that have been run by Id. + var existingMigrations []*Migration + for _, migrationRecord := range migrationRecords { + existingMigrations = append(existingMigrations, &Migration{ + Id: migrationRecord.Id, + }) + } + sort.Sort(byId(existingMigrations)) + + // Make sure all migrations in the database are among the found migrations which + // are to be applied. + if !ms.IgnoreUnknown { + migrationsSearch := make(map[string]struct{}) + for _, migration := range migrations { + migrationsSearch[migration.Id] = struct{}{} + } + for _, existingMigration := range existingMigrations { + if _, ok := migrationsSearch[existingMigration.Id]; !ok { + return nil, nil, newPlanError(existingMigration, "unknown migration in database") + } + } + } + + // Get last migration that was run + record := &Migration{} + if len(existingMigrations) > 0 { + record = existingMigrations[len(existingMigrations)-1] + } + + result := make([]*PlannedMigration, 0) + + // Add missing migrations up to the last run migration. + // This can happen for example when merges happened. + if len(existingMigrations) > 0 { + result = append(result, ToCatchup(migrations, existingMigrations, record)...) + } + + // Figure out which migrations to apply + toApply := ToApply(migrations, record.Id, dir) + toApplyCount := len(toApply) + + if version >= 0 { + targetIndex := 0 + for targetIndex < len(toApply) { + tempVersion := toApply[targetIndex].VersionInt() + if dir == Up && tempVersion > version || dir == Down && tempVersion < version { + return nil, nil, newPlanError(&Migration{}, fmt.Errorf("unknown migration with version id %d in database", version).Error()) + } + if tempVersion == version { + toApplyCount = targetIndex + 1 + break + } + targetIndex++ + } + if targetIndex == len(toApply) { + return nil, nil, newPlanError(&Migration{}, fmt.Errorf("unknown migration with version id %d in database", version).Error()) + } + } else if max > 0 && max < toApplyCount { + toApplyCount = max + } + for _, v := range toApply[0:toApplyCount] { + switch dir { + case Up: + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Up, + DisableTransaction: v.DisableTransactionUp, + }) + case Down: + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Down, + DisableTransaction: v.DisableTransactionDown, + }) + } + } + + return result, dbMap, nil +} + +// Skip a set of migrations +// +// Will skip at most `max` migrations. Pass 0 for no limit. +// +// Returns the number of skipped migrations. +func SkipMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Skip migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Filter a slice of migrations into ones that should be applied. +func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { + index := -1 + if current != "" { + for index < len(migrations)-1 { + index++ + if migrations[index].Id == current { + break + } + } + } + + switch direction { + case Up: + return migrations[index+1:] + case Down: + if index == -1 { + return []*Migration{} + } + toApply := make([]*Migration, index+1) + for i := 0; i < index+1; i++ { + toApply[index-i] = migrations[i] + } + return toApply + } + + panic("Not possible") +} + +func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { + missing := make([]*PlannedMigration, 0) + for _, migration := range migrations { + found := false + for _, existing := range existingMigrations { + if existing.Id == migration.Id { + found = true + break + } + } + if !found && migration.Less(lastRun) { + missing = append(missing, &PlannedMigration{ + Migration: migration, + Queries: migration.Up, + DisableTransaction: migration.DisableTransactionUp, + }) + } + } + return missing +} + +func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + return migSet.GetMigrationRecords(db, dialect) +} + +func (ms MigrationSet) GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, err + } + + var records []*MigrationRecord + query := fmt.Sprintf("SELECT * FROM %s ORDER BY %s ASC", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()), dbMap.Dialect.QuoteField("id")) + _, err = dbMap.Select(&records, query) + if err != nil { + return nil, err + } + + return records, nil +} + +func (ms MigrationSet) getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { + d, ok := MigrationDialects[dialect] + if !ok { + return nil, fmt.Errorf("Unknown dialect: %s", dialect) + } + + // When using the mysql driver, make sure that the parseTime option is + // configured, otherwise it won't map time columns to time.Time. See + // https://github.com/rubenv/sql-migrate/issues/2 + if dialect == "mysql" { + var out *time.Time + err := db.QueryRow("SELECT NOW()").Scan(&out) + if err != nil { + if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" || + err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" || + err.Error() == "sql: Scan error on column index 0, name \"NOW()\": unsupported Scan, storing driver.Value type []uint8 into type *time.Time" { + return nil, errors.New(`Cannot parse dates. + +Make sure that the parseTime option is supplied to your database connection. +Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) + } + return nil, err + } + } + + // Create migration database map + dbMap := &gorp.DbMap{Db: db, Dialect: d} + table := dbMap.AddTableWithNameAndSchema(MigrationRecord{}, ms.SchemaName, ms.getTableName()).SetKeys(false, "Id") + + if dialect == "oci8" || dialect == "godror" { + table.ColMap("Id").SetMaxSize(4000) + } + + if ms.DisableCreateTable { + return dbMap, nil + } + + err := dbMap.CreateTablesIfNotExists() + if err != nil { + // Oracle database does not support `if not exists`, so use `ORA-00955:` error code + // to check if the table exists. + if (dialect == "oci8" || dialect == "godror") && strings.Contains(err.Error(), "ORA-00955:") { + return dbMap, nil + } + return nil, err + } + + return dbMap, nil +} + +// TODO: Run migration + record insert in transaction. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE new file mode 100644 index 000000000000..9c1252513871 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2014-2017 by Ruben Vermeersch +Copyright (C) 2012-2014 by Liam Staskawicz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md new file mode 100644 index 000000000000..fa5341abdb0f --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md @@ -0,0 +1,7 @@ +# SQL migration parser + +Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go new file mode 100644 index 000000000000..f04460e6e20b --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go @@ -0,0 +1,226 @@ +package sqlparse + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +const ( + sqlCmdPrefix = "-- +migrate " + optionNoTransaction = "notransaction" +) + +type ParsedMigration struct { + UpStatements []string + DownStatements []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +// LineSeparator can be used to split migrations by an exact line match. This line +// will be removed from the output. If left blank, it is not considered. It is defaulted +// to blank so you will have to set it manually. +// Use case: in MSSQL, it is convenient to separate commands by GO statements like in +// SQL Query Analyzer. +var LineSeparator = "" + +func errNoTerminator() error { + if len(LineSeparator) == 0 { + return fmt.Errorf(`ERROR: The last statement must be ended by a semicolon or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`) + } + + return fmt.Errorf(`ERROR: The last statement must be ended by a semicolon, a line whose contents are %q, or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`, LineSeparator) +} + +// Checks the line to see if the line has a statement-ending semicolon +// or if the line contains a double-dash comment. +func endsWithSemicolon(line string) bool { + prev := "" + scanner := bufio.NewScanner(strings.NewReader(line)) + scanner.Split(bufio.ScanWords) + + for scanner.Scan() { + word := scanner.Text() + if strings.HasPrefix(word, "--") { + break + } + prev = word + } + + return strings.HasSuffix(prev, ";") +} + +type migrationDirection int + +const ( + directionNone migrationDirection = iota + directionUp + directionDown +) + +type migrateCommand struct { + Command string + Options []string +} + +func (c *migrateCommand) HasOption(opt string) bool { + for _, specifiedOption := range c.Options { + if specifiedOption == opt { + return true + } + } + + return false +} + +func parseCommand(line string) (*migrateCommand, error) { + cmd := &migrateCommand{} + + if !strings.HasPrefix(line, sqlCmdPrefix) { + return nil, fmt.Errorf("ERROR: not a sql-migrate command") + } + + fields := strings.Fields(line[len(sqlCmdPrefix):]) + if len(fields) == 0 { + return nil, fmt.Errorf(`ERROR: incomplete migration command`) + } + + cmd.Command = fields[0] + + cmd.Options = fields[1:] + + return cmd, nil +} + +// Split the given sql script into individual statements. +// +// The base case is to simply split on semicolons, as these +// naturally terminate a statement. +// +// However, more complex cases like pl/pgsql can have semicolons +// within a statement. For these cases, we provide the explicit annotations +// 'StatementBegin' and 'StatementEnd' to allow the script to +// tell us to ignore semicolons. +func ParseMigration(r io.ReadSeeker) (*ParsedMigration, error) { + p := &ParsedMigration{} + + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + statementEnded := false + ignoreSemicolons := false + currentDirection := directionNone + + for scanner.Scan() { + line := scanner.Text() + // ignore comment except beginning with '-- +' + if strings.HasPrefix(line, "-- ") && !strings.HasPrefix(line, "-- +") { + continue + } + + // handle any migrate-specific commands + if strings.HasPrefix(line, sqlCmdPrefix) { + cmd, err := parseCommand(line) + if err != nil { + return nil, err + } + + switch cmd.Command { + case "Up": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionUp + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionUp = true + } + + case "Down": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionDown + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionDown = true + } + + case "StatementBegin": + if currentDirection != directionNone { + ignoreSemicolons = true + } + + case "StatementEnd": + if currentDirection != directionNone { + statementEnded = ignoreSemicolons + ignoreSemicolons = false + } + } + } + + if currentDirection == directionNone { + continue + } + + isLineSeparator := !ignoreSemicolons && len(LineSeparator) > 0 && line == LineSeparator + + if !isLineSeparator && !strings.HasPrefix(line, "-- +") { + if _, err := buf.WriteString(line + "\n"); err != nil { + return nil, err + } + } + + // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement + // Lines that end with semicolon that are in a statement block + // do not conclude statement. + if (!ignoreSemicolons && (endsWithSemicolon(line) || isLineSeparator)) || statementEnded { + statementEnded = false + switch currentDirection { + case directionUp: + p.UpStatements = append(p.UpStatements, buf.String()) + + case directionDown: + p.DownStatements = append(p.DownStatements, buf.String()) + + default: + panic("impossible state") + } + + buf.Reset() + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // diagnose likely migration script errors + if ignoreSemicolons { + return nil, fmt.Errorf("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") + } + + if currentDirection == directionNone { + return nil, fmt.Errorf(`ERROR: no Up/Down annotations found, so no statements were executed. + See https://github.com/rubenv/sql-migrate for details.`) + } + + // allow comment without sql instruction. Example: + // -- +migrate Down + // -- nothing to downgrade! + if len(strings.TrimSpace(buf.String())) > 0 && !strings.HasPrefix(buf.String(), "-- +") { + return nil, errNoTerminator() + } + + return p, nil +} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore new file mode 100644 index 000000000000..ff36b987f070 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.gitignore @@ -0,0 +1,9 @@ +.git +*.swp + +# IntelliJ +.idea/ +*.iml + +# VS code +*.code-workspace diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md new file mode 100644 index 000000000000..432d0fd4ea61 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -0,0 +1,76 @@ +## Decimal v1.4.0 +#### BREAKING +- Drop support for Go version older than 1.10 [#361](https://github.com/shopspring/decimal/pull/361) + +#### FEATURES +- Add implementation of natural logarithm [#339](https://github.com/shopspring/decimal/pull/339) [#357](https://github.com/shopspring/decimal/pull/357) +- Add improved implementation of power operation [#358](https://github.com/shopspring/decimal/pull/358) +- Add Compare method which forwards calls to Cmp [#346](https://github.com/shopspring/decimal/pull/346) +- Add NewFromBigRat constructor [#288](https://github.com/shopspring/decimal/pull/288) +- Add NewFromUint64 constructor [#352](https://github.com/shopspring/decimal/pull/352) + +#### ENHANCEMENTS +- Migrate to Github Actions [#245](https://github.com/shopspring/decimal/pull/245) [#340](https://github.com/shopspring/decimal/pull/340) +- Fix examples for RoundDown, RoundFloor, RoundUp, and RoundCeil [#285](https://github.com/shopspring/decimal/pull/285) [#328](https://github.com/shopspring/decimal/pull/328) [#341](https://github.com/shopspring/decimal/pull/341) +- Use Godoc standard to mark deprecated Equals and StringScaled methods [#342](https://github.com/shopspring/decimal/pull/342) +- Removed unnecessary min function for RescalePair method [#265](https://github.com/shopspring/decimal/pull/265) +- Avoid reallocation of initial slice in MarshalBinary (GobEncode) [#355](https://github.com/shopspring/decimal/pull/355) +- Optimize NumDigits method [#301](https://github.com/shopspring/decimal/pull/301) [#356](https://github.com/shopspring/decimal/pull/356) +- Optimize BigInt method [#359](https://github.com/shopspring/decimal/pull/359) +- Support scanning uint64 [#131](https://github.com/shopspring/decimal/pull/131) [#364](https://github.com/shopspring/decimal/pull/364) +- Add docs section with alternative libraries [#363](https://github.com/shopspring/decimal/pull/363) + +#### BUGFIXES +- Fix incorrect calculation of decimal modulo [#258](https://github.com/shopspring/decimal/pull/258) [#317](https://github.com/shopspring/decimal/pull/317) +- Allocate new(big.Int) in Copy method to deeply clone it [#278](https://github.com/shopspring/decimal/pull/278) +- Fix overflow edge case in QuoRem method [#322](https://github.com/shopspring/decimal/pull/322) + +## Decimal v1.3.1 + +#### ENHANCEMENTS +- Reduce memory allocation in case of initialization from big.Int [#252](https://github.com/shopspring/decimal/pull/252) + +#### BUGFIXES +- Fix binary marshalling of decimal zero value [#253](https://github.com/shopspring/decimal/pull/253) + +## Decimal v1.3.0 + +#### FEATURES +- Add NewFromFormattedString initializer [#184](https://github.com/shopspring/decimal/pull/184) +- Add NewNullDecimal initializer [#234](https://github.com/shopspring/decimal/pull/234) +- Add implementation of natural exponent function (Taylor, Hull-Abraham) [#229](https://github.com/shopspring/decimal/pull/229) +- Add RoundUp, RoundDown, RoundCeil, RoundFloor methods [#196](https://github.com/shopspring/decimal/pull/196) [#202](https://github.com/shopspring/decimal/pull/202) [#220](https://github.com/shopspring/decimal/pull/220) +- Add XML support for NullDecimal [#192](https://github.com/shopspring/decimal/pull/192) +- Add IsInteger method [#179](https://github.com/shopspring/decimal/pull/179) +- Add Copy helper method [#123](https://github.com/shopspring/decimal/pull/123) +- Add InexactFloat64 helper method [#205](https://github.com/shopspring/decimal/pull/205) +- Add CoefficientInt64 helper method [#244](https://github.com/shopspring/decimal/pull/244) + +#### ENHANCEMENTS +- Performance optimization of NewFromString init method [#198](https://github.com/shopspring/decimal/pull/198) +- Performance optimization of Abs and Round methods [#240](https://github.com/shopspring/decimal/pull/240) +- Additional tests (CI) for ppc64le architecture [#188](https://github.com/shopspring/decimal/pull/188) + +#### BUGFIXES +- Fix rounding in FormatFloat fallback path (roundShortest method, fix taken from Go main repository) [#161](https://github.com/shopspring/decimal/pull/161) +- Add slice range checks to UnmarshalBinary method [#232](https://github.com/shopspring/decimal/pull/232) + +## Decimal v1.2.0 + +#### BREAKING +- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) + +#### FEATURES +- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) +- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) +- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) + +#### ENHANCEMENTS +- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) +- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) +- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) +- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) + +#### BUGFIXES +- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) +- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE new file mode 100644 index 000000000000..ad2148aaf93e --- /dev/null +++ b/vendor/github.com/shopspring/decimal/LICENSE @@ -0,0 +1,45 @@ +The MIT License (MIT) + +Copyright (c) 2015 Spring, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +- Based on https://github.com/oguzbilgic/fpd, which has the following license: +""" +The MIT License (MIT) + +Copyright (c) 2013 Oguz Bilgic + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md new file mode 100644 index 000000000000..318c9df5813d --- /dev/null +++ b/vendor/github.com/shopspring/decimal/README.md @@ -0,0 +1,139 @@ +# decimal + +[![ci](https://github.com/shopspring/decimal/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/shopspring/decimal/actions/workflows/ci.yml) +[![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) +[![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) + +Arbitrary-precision fixed-point decimal numbers in go. + +_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. + +## Features + + * The zero-value is 0, and is safe to use without initialization + * Addition, subtraction, multiplication with no loss of precision + * Division with specified precision + * Database/sql serialization/deserialization + * JSON and XML serialization/deserialization + +## Install + +Run `go get github.com/shopspring/decimal` + +## Requirements + +Decimal library requires Go version `>=1.10` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/shopspring/decimal" +) + +func main() { + price, err := decimal.NewFromString("136.02") + if err != nil { + panic(err) + } + + quantity := decimal.NewFromInt(3) + + fee, _ := decimal.NewFromString(".035") + taxRate, _ := decimal.NewFromString(".08875") + + subtotal := price.Mul(quantity) + + preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) + + total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) + + fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 + fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 + fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 + fmt.Println("Total:", total) // Total: 459.824961375 + fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 +} +``` + +## Alternative libraries + +When working with decimal numbers, you might face problems this library is not perfectly suited for. +Fortunately, thanks to the wonderful community we have a dozen other libraries that you can choose from. +Explore other alternatives to find the one that best fits your needs :) + +* [cockroachdb/apd](https://github.com/cockroachdb/apd) - arbitrary precision, mutable and rich API similar to `big.Int`, more performant than this library +* [alpacahq/alpacadecimal](https://github.com/alpacahq/alpacadecimal) - high performance, low precision (12 digits), fully compatible API with this library +* [govalues/decimal](https://github.com/govalues/decimal) - high performance, zero-allocation, low precision (19 digits) +* [greatcloak/decimal](https://github.com/greatcloak/decimal) - fork focusing on billing and e-commerce web application related use cases, includes out-of-the-box BSON marshaling support + +## FAQ + +#### Why don't you just use float64? + +Because float64 (or any binary floating point type, actually) can't represent +numbers such as `0.1` exactly. + +Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that +it prints out `10`, but it actually prints `9.999999999999831`. Over time, +these small errors can really add up! + +#### Why don't you just use big.Rat? + +big.Rat is fine for representing rational numbers, but Decimal is better for +representing money. Why? Here's a (contrived) example: + +Let's say you use big.Rat, and you have two numbers, x and y, both +representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one +out, the string output has to stop somewhere (let's say it stops at 3 decimal +digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did +the other 0.001 go? + +Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE + +With Decimal, the strings being printed out represent the number exactly. So, +if you have `x = y = 1/3` (with precision 3), they will actually be equal to +0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is +unaccounted for! + +You still have to be careful. If you want to split a number `N` 3 ways, you +can't just send `N/3` to three different people. You have to pick one to send +`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. + +But, it is much easier to be careful with Decimal than with big.Rat. + +#### Why isn't the API similar to big.Int's? + +big.Int's API is built to reduce the number of memory allocations for maximal +performance. This makes sense for its use-case, but the trade-off is that the +API is awkward and easy to misuse. + +For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A +developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This +modifies `a` and sets `z` as an alias for `a`, which they might not expect. It +also modifies any other aliases to `a`. + +Here's an example of the subtle bugs you can introduce with big.Int's API: +https://play.golang.org/p/x2R_78pa8r + +In contrast, it's difficult to make such mistakes with decimal. Decimals +behave like other go numbers types: even though `a = b` will not deep copy +`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods +return new Decimals and do not modify the originals. The downside is that +this causes extra allocations, so Decimal is less performant. My assumption +is that if you're using Decimals, you probably care more about correctness +than performance. + +## License + +The MIT License (MIT) + +This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/const.go b/vendor/github.com/shopspring/decimal/const.go new file mode 100644 index 000000000000..e5d6fa87e8dd --- /dev/null +++ b/vendor/github.com/shopspring/decimal/const.go @@ -0,0 +1,63 @@ +package decimal + +import ( + "strings" +) + +const ( + strLn10 = "2.302585092994045684017991454684364207601101488628772976033327900967572609677352480235997205089598298341967784042286248633409525465082806756666287369098781689482907208325554680843799894826233198528393505308965377732628846163366222287698219886746543667474404243274365155048934314939391479619404400222105101714174800368808401264708068556774321622835522011480466371565912137345074785694768346361679210180644507064800027750268491674655058685693567342067058113642922455440575892572420824131469568901675894025677631135691929203337658714166023010570308963457207544037084746994016826928280848118428931484852494864487192780967627127577539702766860595249671667418348570442250719796500471495105049221477656763693866297697952211071826454973477266242570942932258279850258550978526538320760672631716430950599508780752371033310119785754733154142180842754386359177811705430982748238504564801909561029929182431823752535770975053956518769751037497088869218020518933950723853920514463419726528728696511086257149219884997874887377134568620916705849807828059751193854445009978131146915934666241071846692310107598438319191292230792503747298650929009880391941702654416816335727555703151596113564846546190897042819763365836983716328982174407366009162177850541779276367731145041782137660111010731042397832521894898817597921798666394319523936855916447118246753245630912528778330963604262982153040874560927760726641354787576616262926568298704957954913954918049209069438580790032763017941503117866862092408537949861264933479354871737451675809537088281067452440105892444976479686075120275724181874989395971643105518848195288330746699317814634930000321200327765654130472621883970596794457943468343218395304414844803701305753674262153675579814770458031413637793236291560128185336498466942261465206459942072917119370602444929358037007718981097362533224548366988505528285966192805098447175198503666680874970496982273220244823343097169111136813588418696549323714996941979687803008850408979618598756579894836445212043698216415292987811742973332588607915912510967187510929248475023930572665446276200923068791518135803477701295593646298412366497023355174586195564772461857717369368404676577047874319780573853271810933883496338813069945569399346101090745616033312247949360455361849123333063704751724871276379140924398331810164737823379692265637682071706935846394531616949411701841938119405416449466111274712819705817783293841742231409930022911502362192186723337268385688273533371925103412930705632544426611429765388301822384091026198582888433587455960453004548370789052578473166283701953392231047527564998119228742789713715713228319641003422124210082180679525276689858180956119208391760721080919923461516952599099473782780648128058792731993893453415320185969711021407542282796298237068941764740642225757212455392526179373652434440560595336591539160312524480149313234572453879524389036839236450507881731359711238145323701508413491122324390927681724749607955799151363982881058285740538000653371655553014196332241918087621018204919492651483892" +) + +var ( + ln10 = newConstApproximation(strLn10) +) + +type constApproximation struct { + exact Decimal + approximations []Decimal +} + +func newConstApproximation(value string) constApproximation { + parts := strings.Split(value, ".") + coeff, fractional := parts[0], parts[1] + + coeffLen := len(coeff) + maxPrecision := len(fractional) + + var approximations []Decimal + for p := 1; p < maxPrecision; p *= 2 { + r := RequireFromString(value[:coeffLen+p]) + approximations = append(approximations, r) + } + + return constApproximation{ + RequireFromString(value), + approximations, + } +} + +// Returns the smallest approximation available that's at least as precise +// as the passed precision (places after decimal point), i.e. Floor[ log2(precision) ] + 1 +func (c constApproximation) withPrecision(precision int32) Decimal { + i := 0 + + if precision >= 1 { + i++ + } + + for precision >= 16 { + precision /= 16 + i += 4 + } + + for precision >= 2 { + precision /= 2 + i++ + } + + if i >= len(c.approximations) { + return c.exact + } + + return c.approximations[i] +} diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go new file mode 100644 index 000000000000..9958d6902063 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal-go.go @@ -0,0 +1,415 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type decimal struct { + d [800]byte // digits, big-endian representation + nd int // number of digits used + dp int // decimal point + neg bool // negative flag + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) +const maxShift = uintSize - 4 + +// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. +func rightShift(a *decimal, k uint) { + r := 0 // read pointer + w := 0 // write pointer + + // Pick up enough leading digits to cover first shift. + var n uint + for ; n>>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := uint(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + var mask uint = (1 << k) - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := uint(a.d[r]) + dig := n >> k + n &= mask + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n &= mask + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + // Go up to 60 to be large enough for 32bit and 64bit platforms. + /* + seq 60 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\t{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\t{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 + {9, "37252902984619140625"}, // * 268435456 + {9, "186264514923095703125"}, // * 536870912 + {10, "931322574615478515625"}, // * 1073741824 + {10, "4656612873077392578125"}, // * 2147483648 + {10, "23283064365386962890625"}, // * 4294967296 + {10, "116415321826934814453125"}, // * 8589934592 + {11, "582076609134674072265625"}, // * 17179869184 + {11, "2910383045673370361328125"}, // * 34359738368 + {11, "14551915228366851806640625"}, // * 68719476736 + {12, "72759576141834259033203125"}, // * 137438953472 + {12, "363797880709171295166015625"}, // * 274877906944 + {12, "1818989403545856475830078125"}, // * 549755813888 + {13, "9094947017729282379150390625"}, // * 1099511627776 + {13, "45474735088646411895751953125"}, // * 2199023255552 + {13, "227373675443232059478759765625"}, // * 4398046511104 + {13, "1136868377216160297393798828125"}, // * 8796093022208 + {14, "5684341886080801486968994140625"}, // * 17592186044416 + {14, "28421709430404007434844970703125"}, // * 35184372088832 + {14, "142108547152020037174224853515625"}, // * 70368744177664 + {15, "710542735760100185871124267578125"}, // * 140737488355328 + {15, "3552713678800500929355621337890625"}, // * 281474976710656 + {15, "17763568394002504646778106689453125"}, // * 562949953421312 + {16, "88817841970012523233890533447265625"}, // * 1125899906842624 + {16, "444089209850062616169452667236328125"}, // * 2251799813685248 + {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 + {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 + {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 + {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 + {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 + {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 + {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 + {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 + {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + + // Pick up a digit, put down a digit. + var n uint + for r--; r >= 0; r-- { + n += (uint(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go new file mode 100644 index 000000000000..a37a2301ef0c --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -0,0 +1,2339 @@ +// Package decimal implements an arbitrary precision fixed-point decimal. +// +// The zero-value of a Decimal is 0, as you would expect. +// +// The best way to create a new Decimal is to use decimal.NewFromString, ex: +// +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" +// +// To use Decimal as part of a struct: +// +// type StructName struct { +// Number Decimal +// } +// +// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. +package decimal + +import ( + "database/sql/driver" + "encoding/binary" + "fmt" + "math" + "math/big" + "regexp" + "strconv" + "strings" +) + +// DivisionPrecision is the number of decimal places in the result when it +// doesn't divide exactly. +// +// Example: +// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" +var DivisionPrecision = 16 + +// PowPrecisionNegativeExponent specifies the maximum precision of the result (digits after decimal point) +// when calculating decimal power. Only used for cases where the exponent is a negative number. +// This constant applies to Pow, PowInt32 and PowBigInt methods, PowWithPrecision method is not constrained by it. +// +// Example: +// +// d1, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d1.String() // output: "0.0043282548476454" +// +// decimal.PowPrecisionNegativeExponent = 24 +// d2, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d2.String() // output: "0.004328254847645429362881" +var PowPrecisionNegativeExponent = 16 + +// MarshalJSONWithoutQuotes should be set to true if you want the decimal to +// be JSON marshaled as a number, instead of as a string. +// WARNING: this is dangerous for decimals with many digits, since many JSON +// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 +// double-precision floating point numbers, which means you can potentially +// silently lose precision. +var MarshalJSONWithoutQuotes = false + +// ExpMaxIterations specifies the maximum number of iterations needed to calculate +// precise natural exponent value using ExpHullAbrham method. +var ExpMaxIterations = 1000 + +// Zero constant, to make computations faster. +// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. +var Zero = New(0, 1) + +var zeroInt = big.NewInt(0) +var oneInt = big.NewInt(1) +var twoInt = big.NewInt(2) +var fourInt = big.NewInt(4) +var fiveInt = big.NewInt(5) +var tenInt = big.NewInt(10) +var twentyInt = big.NewInt(20) + +var factorials = []Decimal{New(1, 0)} + +// Decimal represents a fixed-point decimal. It is immutable. +// number = value * 10 ^ exp +type Decimal struct { + value *big.Int + + // NOTE(vadim): this must be an int32, because we cast it to float64 during + // calculations. If exp is 64 bit, we might lose precision. + // If we cared about being able to represent every possible decimal, we + // could make exp a *big.Int but it would hurt performance and numbers + // like that are unrealistic. + exp int32 +} + +// New returns a new fixed-point decimal, value * 10 ^ exp. +func New(value int64, exp int32) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: exp, + } +} + +// NewFromInt converts an int64 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt(value int64) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: 0, + } +} + +// NewFromInt32 converts an int32 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt32(value int32) Decimal { + return Decimal{ + value: big.NewInt(int64(value)), + exp: 0, + } +} + +// NewFromUint64 converts an uint64 to Decimal. +// +// Example: +// +// NewFromUint64(123).String() // output: "123" +func NewFromUint64(value uint64) Decimal { + return Decimal{ + value: new(big.Int).SetUint64(value), + exp: 0, + } +} + +// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp +func NewFromBigInt(value *big.Int, exp int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value), + exp: exp, + } +} + +// NewFromBigRat returns a new Decimal from a big.Rat. The numerator and +// denominator are divided and rounded to the given precision. +// +// Example: +// +// d1 := NewFromBigRat(big.NewRat(0, 1), 0) // output: "0" +// d2 := NewFromBigRat(big.NewRat(4, 5), 1) // output: "0.8" +// d3 := NewFromBigRat(big.NewRat(1000, 3), 3) // output: "333.333" +// d4 := NewFromBigRat(big.NewRat(2, 7), 4) // output: "0.2857" +func NewFromBigRat(value *big.Rat, precision int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value.Num()), + exp: 0, + }.DivRound(Decimal{ + value: new(big.Int).Set(value.Denom()), + exp: 0, + }, precision) +} + +// NewFromString returns a new Decimal from a string representation. +// Trailing zeroes are not trimmed. +// +// Example: +// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") +func NewFromString(value string) (Decimal, error) { + originalInput := value + var intString string + var exp int64 + + // Check if number is using scientific notation + eIndex := strings.IndexAny(value, "Ee") + if eIndex != -1 { + expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) + } + return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) + } + value = value[:eIndex] + exp = expInt + } + + pIndex := -1 + vLen := len(value) + for i := 0; i < vLen; i++ { + if value[i] == '.' { + if pIndex > -1 { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) + } + pIndex = i + } + } + + if pIndex == -1 { + // There is no decimal point, we can just parse the original string as + // an int + intString = value + } else { + if pIndex+1 < vLen { + intString = value[:pIndex] + value[pIndex+1:] + } else { + intString = value[:pIndex] + } + expInt := -len(value[pIndex+1:]) + exp += int64(expInt) + } + + var dValue *big.Int + // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow + if len(intString) <= 18 { + parsed64, err := strconv.ParseInt(intString, 10, 64) + if err != nil { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + dValue = big.NewInt(parsed64) + } else { + dValue = new(big.Int) + _, ok := dValue.SetString(intString, 10) + if !ok { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + } + + if exp < math.MinInt32 || exp > math.MaxInt32 { + // NOTE(vadim): I doubt a string could realistically be this long + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) + } + + return Decimal{ + value: dValue, + exp: int32(exp), + }, nil +} + +// NewFromFormattedString returns a new Decimal from a formatted string representation. +// The second argument - replRegexp, is a regular expression that is used to find characters that should be +// removed from given decimal string representation. All matched characters will be replaced with an empty string. +// +// Example: +// +// r := regexp.MustCompile("[$,]") +// d1, err := NewFromFormattedString("$5,125.99", r) +// +// r2 := regexp.MustCompile("[_]") +// d2, err := NewFromFormattedString("1_000_000", r2) +// +// r3 := regexp.MustCompile("[USD\\s]") +// d3, err := NewFromFormattedString("5000 USD", r3) +func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { + parsedValue := replRegexp.ReplaceAllString(value, "") + d, err := NewFromString(parsedValue) + if err != nil { + return Decimal{}, err + } + return d, nil +} + +// RequireFromString returns a new Decimal from a string representation +// or panics if NewFromString had returned an error. +// +// Example: +// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") +func RequireFromString(value string) Decimal { + dec, err := NewFromString(value) + if err != nil { + panic(err) + } + return dec +} + +// NewFromFloat converts a float64 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 15 digits, but may be more in some cases. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat(value float64) Decimal { + if value == 0 { + return New(0, 0) + } + return newFromFloat(value, math.Float64bits(value), &float64info) +} + +// NewFromFloat32 converts a float32 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 6-8 digits depending on the input. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat32(value float32) Decimal { + if value == 0 { + return New(0, 0) + } + // XOR is workaround for https://github.com/golang/go/issues/26285 + a := math.Float32bits(value) ^ 0x80808080 + return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) +} + +func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { + if math.IsNaN(val) || math.IsInf(val, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) + } + exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 + + roundShortest(&d, mant, exp, flt) + // If less than 19 digits, we can do calculation in an int64. + if d.nd < 19 { + tmp := int64(0) + m := int64(1) + for i := d.nd - 1; i >= 0; i-- { + tmp += m * int64(d.d[i]-'0') + m *= 10 + } + if d.neg { + tmp *= -1 + } + return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} + } + dValue := new(big.Int) + dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) + if ok { + return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} + } + + return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) +} + +// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary +// number of fractional digits. +// +// Example: +// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" +func NewFromFloatWithExponent(value float64, exp int32) Decimal { + if math.IsNaN(value) || math.IsInf(value, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) + } + + bits := math.Float64bits(value) + mant := bits & (1<<52 - 1) + exp2 := int32((bits >> 52) & (1<<11 - 1)) + sign := bits >> 63 + + if exp2 == 0 { + // specials + if mant == 0 { + return Decimal{} + } + // subnormal + exp2++ + } else { + // normal + mant |= 1 << 52 + } + + exp2 -= 1023 + 52 + + // normalizing base-2 values + for mant&1 == 0 { + mant = mant >> 1 + exp2++ + } + + // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 + if exp < 0 && exp < exp2 { + if exp2 < 0 { + exp = exp2 + } else { + exp = 0 + } + } + + // representing 10^M * 2^N as 5^M * 2^(M+N) + exp2 -= exp + + temp := big.NewInt(1) + dMant := big.NewInt(int64(mant)) + + // applying 5^M + if exp > 0 { + temp = temp.SetInt64(int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + } else if exp < 0 { + temp = temp.SetInt64(-int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + dMant = dMant.Mul(dMant, temp) + temp = temp.SetUint64(1) + } + + // applying 2^(M+N) + if exp2 > 0 { + dMant = dMant.Lsh(dMant, uint(exp2)) + } else if exp2 < 0 { + temp = temp.Lsh(temp, uint(-exp2)) + } + + // rounding and downscaling + if exp > 0 || exp2 < 0 { + halfDown := new(big.Int).Rsh(temp, 1) + dMant = dMant.Add(dMant, halfDown) + dMant = dMant.Quo(dMant, temp) + } + + if sign == 1 { + dMant = dMant.Neg(dMant) + } + + return Decimal{ + value: dMant, + exp: exp, + } +} + +// Copy returns a copy of decimal with the same value and exponent, but a different pointer to value. +func (d Decimal) Copy() Decimal { + d.ensureInitialized() + return Decimal{ + value: new(big.Int).Set(d.value), + exp: d.exp, + } +} + +// rescale returns a rescaled version of the decimal. Returned +// decimal may be less precise if the given exponent is bigger +// than the initial exponent of the Decimal. +// NOTE: this will truncate, NOT round +// +// Example: +// +// d := New(12345, -4) +// d2 := d.rescale(-1) +// d3 := d2.rescale(-4) +// println(d1) +// println(d2) +// println(d3) +// +// Output: +// +// 1.2345 +// 1.2 +// 1.2000 +func (d Decimal) rescale(exp int32) Decimal { + d.ensureInitialized() + + if d.exp == exp { + return Decimal{ + new(big.Int).Set(d.value), + d.exp, + } + } + + // NOTE(vadim): must convert exps to float64 before - to prevent overflow + diff := math.Abs(float64(exp) - float64(d.exp)) + value := new(big.Int).Set(d.value) + + expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) + if exp > d.exp { + value = value.Quo(value, expScale) + } else if exp < d.exp { + value = value.Mul(value, expScale) + } + + return Decimal{ + value: value, + exp: exp, + } +} + +// Abs returns the absolute value of the decimal. +func (d Decimal) Abs() Decimal { + if !d.IsNegative() { + return d + } + d.ensureInitialized() + d2Value := new(big.Int).Abs(d.value) + return Decimal{ + value: d2Value, + exp: d.exp, + } +} + +// Add returns d + d2. +func (d Decimal) Add(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Add(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Sub returns d - d2. +func (d Decimal) Sub(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Sub(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Neg returns -d. +func (d Decimal) Neg() Decimal { + d.ensureInitialized() + val := new(big.Int).Neg(d.value) + return Decimal{ + value: val, + exp: d.exp, + } +} + +// Mul returns d * d2. +func (d Decimal) Mul(d2 Decimal) Decimal { + d.ensureInitialized() + d2.ensureInitialized() + + expInt64 := int64(d.exp) + int64(d2.exp) + if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { + // NOTE(vadim): better to panic than give incorrect results, as + // Decimals are usually used for money + panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) + } + + d3Value := new(big.Int).Mul(d.value, d2.value) + return Decimal{ + value: d3Value, + exp: int32(expInt64), + } +} + +// Shift shifts the decimal in base 10. +// It shifts left when shift is positive and right if shift is negative. +// In simpler terms, the given value for shift is added to the exponent +// of the decimal. +func (d Decimal) Shift(shift int32) Decimal { + d.ensureInitialized() + return Decimal{ + value: new(big.Int).Set(d.value), + exp: d.exp + shift, + } +} + +// Div returns d / d2. If it doesn't divide exactly, the result will have +// DivisionPrecision digits after the decimal point. +func (d Decimal) Div(d2 Decimal) Decimal { + return d.DivRound(d2, int32(DivisionPrecision)) +} + +// QuoRem does division with remainder +// d.QuoRem(d2,precision) returns quotient q and remainder r such that +// +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// +// Note that precision<0 is allowed as input. +func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { + d.ensureInitialized() + d2.ensureInitialized() + if d2.value.Sign() == 0 { + panic("decimal division by 0") + } + scale := -precision + e := int64(d.exp) - int64(d2.exp) - int64(scale) + if e > math.MaxInt32 || e < math.MinInt32 { + panic("overflow in decimal QuoRem") + } + var aa, bb, expo big.Int + var scalerest int32 + // d = a 10^ea + // d2 = b 10^eb + if e < 0 { + aa = *d.value + expo.SetInt64(-e) + bb.Exp(tenInt, &expo, nil) + bb.Mul(d2.value, &bb) + scalerest = d.exp + // now aa = a + // bb = b 10^(scale + eb - ea) + } else { + expo.SetInt64(e) + aa.Exp(tenInt, &expo, nil) + aa.Mul(d.value, &aa) + bb = *d2.value + scalerest = scale + d2.exp + // now aa = a ^ (ea - eb - scale) + // bb = b + } + var q, r big.Int + q.QuoRem(&aa, &bb, &r) + dq := Decimal{value: &q, exp: scale} + dr := Decimal{value: &r, exp: scalerest} + return dq, dr +} + +// DivRound divides and rounds to a given precision +// i.e. to an integer multiple of 10^(-precision) +// +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// +// Note that precision<0 is allowed as input. +func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { + // QuoRem already checks initialization + q, r := d.QuoRem(d2, precision) + // the actual rounding decision is based on comparing r*10^precision and d2/2 + // instead compare 2 r 10 ^precision and d2 + var rv2 big.Int + rv2.Abs(r.value) + rv2.Lsh(&rv2, 1) + // now rv2 = abs(r.value) * 2 + r2 := Decimal{value: &rv2, exp: r.exp + precision} + // r2 is now 2 * r * 10 ^ precision + var c = r2.Cmp(d2.Abs()) + + if c < 0 { + return q + } + + if d.value.Sign()*d2.value.Sign() < 0 { + return q.Sub(New(1, -precision)) + } + + return q.Add(New(1, -precision)) +} + +// Mod returns d % d2. +func (d Decimal) Mod(d2 Decimal) Decimal { + _, r := d.QuoRem(d2, 0) + return r +} + +// Pow returns d to the power of d2. +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Pow returns 0 (zero-value of Decimal) instead of error for power operation edge cases, to handle those edge cases use PowWithPrecision +// Edge cases not handled by Pow: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1 := d1.Pow(d2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2 := d3.Pow(d4) +// res2.String() // output: "10118.08037125" +func (d Decimal) Pow(d2 Decimal) Decimal { + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{} + } + if expSign == 1 { + return Decimal{zeroInt, 0} + } + if expSign == -1 { + return Decimal{} + } + } + + if expSign == 0 { + return Decimal{oneInt, 0} + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{} + } + + intPartPow, _ := d.PowBigInt(expIntPart.value) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + precision := digitsBase + + if digitsExponent > precision { + precision += digitsExponent + } + + precision += 6 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res +} + +// PowWithPrecision returns d to the power of d2. +// Precision parameter specifies minimum precision of the result (digits after decimal point). +// Returned decimal is not rounded to 'precision' places after decimal point. +// +// PowWithPrecision returns error when: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1, err := d1.PowWithPrecision(d2, 2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2, err := d3.PowWithPrecision(d4, 5) +// res2.String() // output: "10118.080371595015625" +// +// d5 := decimal.NewFromFloat(-3.0) +// d6 := decimal.NewFromFloat(-6.0) +// res3, err := d5.PowWithPrecision(d6, 10) +// res3.String() // output: "0.0013717421" +func (d Decimal) PowWithPrecision(d2 Decimal, precision int32) (Decimal, error) { + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + if expSign == 1 { + return Decimal{zeroInt, 0}, nil + } + if expSign == -1 { + return Decimal{}, fmt.Errorf("cannot represent infinity value of 0 ** y, where y < 0") + } + } + + if expSign == 0 { + return Decimal{oneInt, 0}, nil + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent imaginary value of x ** y, where x < 0 and y is non-integer decimal") + } + + intPartPow, _ := d.powBigIntWithPrecision(expIntPart.value, precision) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow, nil + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + if int32(digitsBase) > precision { + precision = int32(digitsBase) + } + if int32(digitsExponent) > precision { + precision += int32(digitsExponent) + } + // increase precision by 10 to compensate for errors in further calculations + precision += 10 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(precision) + if err != nil { + return Decimal{}, err + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(precision) + if err != nil { + return Decimal{}, err + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res, nil +} + +// PowInt32 returns d to the power of exp, where exp is int32. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(4.0).PowInt32(4) +// d1.String() // output: "256" +// +// d2, err := decimal.NewFromFloat(3.13).PowInt32(5) +// d2.String() // output: "300.4150512793" +func (d Decimal) PowInt32(exp int32) (Decimal, error) { + if d.IsZero() && exp == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + + isExpNeg := exp < 0 + exp = abs(exp) + + n, result := d, New(1, 0) + + for exp > 0 { + if exp%2 == 1 { + result = result.Mul(n) + } + exp /= 2 + + if exp > 0 { + n = n.Mul(n) + } + } + + if isExpNeg { + return New(1, 0).DivRound(result, int32(PowPrecisionNegativeExponent)), nil + } + + return result, nil +} + +// PowBigInt returns d to the power of exp, where exp is big.Int. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(3.0).PowBigInt(big.NewInt(3)) +// d1.String() // output: "27" +// +// d2, err := decimal.NewFromFloat(629.25).PowBigInt(big.NewInt(5)) +// d2.String() // output: "98654323103449.5673828125" +func (d Decimal) PowBigInt(exp *big.Int) (Decimal, error) { + return d.powBigIntWithPrecision(exp, int32(PowPrecisionNegativeExponent)) +} + +func (d Decimal) powBigIntWithPrecision(exp *big.Int, precision int32) (Decimal, error) { + if d.IsZero() && exp.Sign() == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + + tmpExp := new(big.Int).Set(exp) + isExpNeg := exp.Sign() < 0 + + if isExpNeg { + tmpExp.Abs(tmpExp) + } + + n, result := d, New(1, 0) + + for tmpExp.Sign() > 0 { + if tmpExp.Bit(0) == 1 { + result = result.Mul(n) + } + tmpExp.Rsh(tmpExp, 1) + + if tmpExp.Sign() > 0 { + n = n.Mul(n) + } + } + + if isExpNeg { + return New(1, 0).DivRound(result, precision), nil + } + + return result, nil +} + +// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. +// OverallPrecision argument specifies the overall precision of the result (integer part + decimal part). +// +// ExpHullAbrham is faster than ExpTaylor for small precision values, but it is much slower for large precision values. +// +// Example: +// +// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" +// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" +func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { + // Algorithm based on Variable precision exponential function. + // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. + if d.IsZero() { + return Decimal{oneInt, 0}, nil + } + + currentPrecision := overallPrecision + + // Algorithm does not work if currentPrecision * 23 < |x|. + // Precision is automatically increased in such cases, so the value can be calculated precisely. + // If newly calculated precision is higher than ExpMaxIterations the currentPrecision will not be changed. + f := d.Abs().InexactFloat64() + if ncp := f / 23; ncp > float64(currentPrecision) && ncp < float64(ExpMaxIterations) { + currentPrecision = uint32(math.Ceil(ncp)) + } + + // fail if abs(d) beyond an over/underflow threshold + overflowThreshold := New(23*int64(currentPrecision), 0) + if d.Abs().Cmp(overflowThreshold) > 0 { + return Decimal{}, fmt.Errorf("over/underflow threshold, exp(x) cannot be calculated precisely") + } + + // Return 1 if abs(d) small enough; this also avoids later over/underflow + overflowThreshold2 := New(9, -int32(currentPrecision)-1) + if d.Abs().Cmp(overflowThreshold2) <= 0 { + return Decimal{oneInt, d.exp}, nil + } + + // t is the smallest integer >= 0 such that the corresponding abs(d/k) < 1 + t := d.exp + int32(d.NumDigits()) // Add d.NumDigits because the paper assumes that d.value [0.1, 1) + + if t < 0 { + t = 0 + } + + k := New(1, t) // reduction factor + r := Decimal{new(big.Int).Set(d.value), d.exp - t} // reduced argument + p := int32(currentPrecision) + t + 2 // precision for calculating the sum + + // Determine n, the number of therms for calculating sum + // use first Newton step (1.435p - 1.182) / log10(p/abs(r)) + // for solving appropriate equation, along with directed + // roundings and simple rational bound for log10(p/abs(r)) + rf := r.Abs().InexactFloat64() + pf := float64(p) + nf := math.Ceil((1.453*pf - 1.182) / math.Log10(pf/rf)) + if nf > float64(ExpMaxIterations) || math.IsNaN(nf) { + return Decimal{}, fmt.Errorf("exact value cannot be calculated in <=ExpMaxIterations iterations") + } + n := int64(nf) + + tmp := New(0, 0) + sum := New(1, 0) + one := New(1, 0) + for i := n - 1; i > 0; i-- { + tmp.value.SetInt64(i) + sum = sum.Mul(r.DivRound(tmp, p)) + sum = sum.Add(one) + } + + ki := k.IntPart() + res := New(1, 0) + for i := ki; i > 0; i-- { + res = res.Mul(sum) + } + + resNumDigits := int32(res.NumDigits()) + + var roundDigits int32 + if resNumDigits > abs(res.exp) { + roundDigits = int32(currentPrecision) - resNumDigits - res.exp + } else { + roundDigits = int32(currentPrecision) + } + + res = res.Round(roundDigits) + + return res, nil +} + +// ExpTaylor calculates the natural exponent of decimal (e to the power of d) using Taylor series expansion. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// ExpTaylor is much faster for large precision values than ExpHullAbrham. +// +// Example: +// +// d, err := NewFromFloat(26.1).ExpTaylor(2).String() +// d.String() // output: "216314672147.06" +// +// NewFromFloat(26.1).ExpTaylor(20).String() +// d.String() // output: "216314672147.05767284062928674083" +// +// NewFromFloat(26.1).ExpTaylor(-10).String() +// d.String() // output: "220000000000" +func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { + // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only + if d.IsZero() { + return Decimal{oneInt, 0}.Round(precision), nil + } + + var epsilon Decimal + var divPrecision int32 + if precision < 0 { + epsilon = New(1, -1) + divPrecision = 8 + } else { + epsilon = New(1, -precision-1) + divPrecision = precision + 1 + } + + decAbs := d.Abs() + pow := d.Abs() + factorial := New(1, 0) + + result := New(1, 0) + + for i := int64(1); ; { + step := pow.DivRound(factorial, divPrecision) + result = result.Add(step) + + // Stop Taylor series when current step is smaller than epsilon + if step.Cmp(epsilon) < 0 { + break + } + + pow = pow.Mul(decAbs) + + i++ + + // Calculate next factorial number or retrieve cached value + if len(factorials) >= int(i) && !factorials[i-1].IsZero() { + factorial = factorials[i-1] + } else { + // To avoid any race conditions, firstly the zero value is appended to a slice to create + // a spot for newly calculated factorial. After that, the zero value is replaced by calculated + // factorial using the index notation. + factorial = factorials[i-2].Mul(New(i, 0)) + factorials = append(factorials, Zero) + factorials[i-1] = factorial + } + } + + if d.Sign() < 0 { + result = New(1, 0).DivRound(result, precision+1) + } + + result = result.Round(precision) + return result, nil +} + +// Ln calculates natural logarithm of d. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// Example: +// +// d1, err := NewFromFloat(13.3).Ln(2) +// d1.String() // output: "2.59" +// +// d2, err := NewFromFloat(579.161).Ln(10) +// d2.String() // output: "6.3615805046" +func (d Decimal) Ln(precision int32) (Decimal, error) { + // Algorithm based on The Use of Iteration Methods for Approximating the Natural Logarithm, + // James F. Epperson, The American Mathematical Monthly, Vol. 96, No. 9, November 1989, pp. 831-835. + if d.IsNegative() { + return Decimal{}, fmt.Errorf("cannot calculate natural logarithm for negative decimals") + } + + if d.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent natural logarithm of 0, result: -infinity") + } + + calcPrecision := precision + 2 + z := d.Copy() + + var comp1, comp3, comp2, comp4, reduceAdjust Decimal + comp1 = z.Sub(Decimal{oneInt, 0}) + comp3 = Decimal{oneInt, -1} + + // for decimal in range [0.9, 1.1] where ln(d) is close to 0 + usePowerSeries := false + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // reduce input decimal to range [0.1, 1) + expDelta := int32(z.NumDigits()) + z.exp + z.exp -= expDelta + + // Input decimal was reduced by factor of 10^expDelta, thus we will need to add + // ln(10^expDelta) = expDelta * ln(10) + // to the result to compensate that + ln10 := ln10.withPrecision(calcPrecision) + reduceAdjust = NewFromInt32(expDelta) + reduceAdjust = reduceAdjust.Mul(ln10) + + comp1 = z.Sub(Decimal{oneInt, 0}) + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // initial estimate using floats + zFloat := z.InexactFloat64() + comp1 = NewFromFloat(math.Log(zFloat)) + } + } + + epsilon := Decimal{oneInt, -calcPrecision} + + if usePowerSeries { + // Power Series - https://en.wikipedia.org/wiki/Logarithm#Power_series + // Calculating n-th term of formula: ln(z+1) = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + // until the difference between current and next term is smaller than epsilon. + // Coverage quite fast for decimals close to 1.0 + + // z + 2 + comp2 = comp1.Add(Decimal{twoInt, 0}) + // z / (z + 2) + comp3 = comp1.DivRound(comp2, calcPrecision) + // 2 * (z / (z + 2)) + comp1 = comp3.Add(comp3) + comp2 = comp1.Copy() + + for n := 1; ; n++ { + // 2 * (z / (z+2))^(2n+1) + comp2 = comp2.Mul(comp3).Mul(comp3) + + // 1 / (2n+1) * 2 * (z / (z+2))^(2n+1) + comp4 = NewFromInt(int64(2*n + 1)) + comp4 = comp2.DivRound(comp4, calcPrecision) + + // comp1 = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + comp1 = comp1.Add(comp4) + + if comp4.Abs().Cmp(epsilon) <= 0 { + break + } + } + } else { + // Halley's Iteration. + // Calculating n-th term of formula: a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z), + // until the difference between current and next term is smaller than epsilon + var prevStep Decimal + maxIters := calcPrecision*2 + 10 + + for i := int32(0); i < maxIters; i++ { + // exp(a_n) + comp3, _ = comp1.ExpTaylor(calcPrecision) + // exp(a_n) - z + comp2 = comp3.Sub(z) + // 2 * (exp(a_n) - z) + comp2 = comp2.Add(comp2) + // exp(a_n) + z + comp4 = comp3.Add(z) + // 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp3 = comp2.DivRound(comp4, calcPrecision) + // comp1 = a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp1 = comp1.Sub(comp3) + + if prevStep.Add(comp3).IsZero() { + // If iteration steps oscillate we should return early and prevent an infinity loop + // NOTE(mwoss): This should be quite a rare case, returning error is not necessary + break + } + + if comp3.Abs().Cmp(epsilon) <= 0 { + break + } + + prevStep = comp3 + } + } + + comp1 = comp1.Add(reduceAdjust) + + return comp1.Round(precision), nil +} + +// NumDigits returns the number of digits of the decimal coefficient (d.Value) +func (d Decimal) NumDigits() int { + if d.value == nil { + return 1 + } + + if d.value.IsInt64() { + i64 := d.value.Int64() + // restrict fast path to integers with exact conversion to float64 + if i64 <= (1<<53) && i64 >= -(1<<53) { + if i64 == 0 { + return 1 + } + return int(math.Log10(math.Abs(float64(i64)))) + 1 + } + } + + estimatedNumDigits := int(float64(d.value.BitLen()) / math.Log2(10)) + + // estimatedNumDigits (lg10) may be off by 1, need to verify + digitsBigInt := big.NewInt(int64(estimatedNumDigits)) + errorCorrectionUnit := digitsBigInt.Exp(tenInt, digitsBigInt, nil) + + if d.value.CmpAbs(errorCorrectionUnit) >= 0 { + return estimatedNumDigits + 1 + } + + return estimatedNumDigits +} + +// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. +func (d Decimal) IsInteger() bool { + // The most typical case, all decimal with exponent higher or equal 0 can be represented as integer + if d.exp >= 0 { + return true + } + // When the exponent is negative we have to check every number after the decimal place + // If all of them are zeroes, we are sure that given decimal can be represented as an integer + var r big.Int + q := new(big.Int).Set(d.value) + for z := abs(d.exp); z > 0; z-- { + q.QuoRem(q, tenInt, &r) + if r.Cmp(zeroInt) != 0 { + return false + } + } + return true +} + +// Abs calculates absolute value of any int32. Used for calculating absolute value of decimal's exponent. +func abs(n int32) int32 { + if n < 0 { + return -n + } + return n +} + +// Cmp compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +func (d Decimal) Cmp(d2 Decimal) int { + d.ensureInitialized() + d2.ensureInitialized() + + if d.exp == d2.exp { + return d.value.Cmp(d2.value) + } + + rd, rd2 := RescalePair(d, d2) + + return rd.value.Cmp(rd2.value) +} + +// Compare compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +func (d Decimal) Compare(d2 Decimal) int { + return d.Cmp(d2) +} + +// Equal returns whether the numbers represented by d and d2 are equal. +func (d Decimal) Equal(d2 Decimal) bool { + return d.Cmp(d2) == 0 +} + +// Deprecated: Equals is deprecated, please use Equal method instead. +func (d Decimal) Equals(d2 Decimal) bool { + return d.Equal(d2) +} + +// GreaterThan (GT) returns true when d is greater than d2. +func (d Decimal) GreaterThan(d2 Decimal) bool { + return d.Cmp(d2) == 1 +} + +// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. +func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == 1 || cmp == 0 +} + +// LessThan (LT) returns true when d is less than d2. +func (d Decimal) LessThan(d2 Decimal) bool { + return d.Cmp(d2) == -1 +} + +// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. +func (d Decimal) LessThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == -1 || cmp == 0 +} + +// Sign returns: +// +// -1 if d < 0 +// 0 if d == 0 +// +1 if d > 0 +func (d Decimal) Sign() int { + if d.value == nil { + return 0 + } + return d.value.Sign() +} + +// IsPositive return +// +// true if d > 0 +// false if d == 0 +// false if d < 0 +func (d Decimal) IsPositive() bool { + return d.Sign() == 1 +} + +// IsNegative return +// +// true if d < 0 +// false if d == 0 +// false if d > 0 +func (d Decimal) IsNegative() bool { + return d.Sign() == -1 +} + +// IsZero return +// +// true if d == 0 +// false if d > 0 +// false if d < 0 +func (d Decimal) IsZero() bool { + return d.Sign() == 0 +} + +// Exponent returns the exponent, or scale component of the decimal. +func (d Decimal) Exponent() int32 { + return d.exp +} + +// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() +func (d Decimal) Coefficient() *big.Int { + d.ensureInitialized() + // we copy the coefficient so that mutating the result does not mutate the Decimal. + return new(big.Int).Set(d.value) +} + +// CoefficientInt64 returns the coefficient of the decimal as int64. It is scaled by 10^Exponent() +// If coefficient cannot be represented in an int64, the result will be undefined. +func (d Decimal) CoefficientInt64() int64 { + d.ensureInitialized() + return d.value.Int64() +} + +// IntPart returns the integer component of the decimal. +func (d Decimal) IntPart() int64 { + scaledD := d.rescale(0) + return scaledD.value.Int64() +} + +// BigInt returns integer component of the decimal as a BigInt. +func (d Decimal) BigInt() *big.Int { + scaledD := d.rescale(0) + return scaledD.value +} + +// BigFloat returns decimal as BigFloat. +// Be aware that casting decimal to BigFloat might cause a loss of precision. +func (d Decimal) BigFloat() *big.Float { + f := &big.Float{} + f.SetString(d.String()) + return f +} + +// Rat returns a rational number representation of the decimal. +func (d Decimal) Rat() *big.Rat { + d.ensureInitialized() + if d.exp <= 0 { + // NOTE(vadim): must negate after casting to prevent int32 overflow + denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) + return new(big.Rat).SetFrac(d.value, denom) + } + + mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) + num := new(big.Int).Mul(d.value, mul) + return new(big.Rat).SetFrac(num, oneInt) +} + +// Float64 returns the nearest float64 value for d and a bool indicating +// whether f represents d exactly. +// For more details, see the documentation for big.Rat.Float64 +func (d Decimal) Float64() (f float64, exact bool) { + return d.Rat().Float64() +} + +// InexactFloat64 returns the nearest float64 value for d. +// It doesn't indicate if the returned value represents d exactly. +func (d Decimal) InexactFloat64() float64 { + f, _ := d.Float64() + return f +} + +// String returns the string representation of the decimal +// with the fixed point. +// +// Example: +// +// d := New(-12345, -3) +// println(d.String()) +// +// Output: +// +// -12.345 +func (d Decimal) String() string { + return d.string(true) +} + +// StringFixed returns a rounded fixed-point string with places digits after +// the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" +func (d Decimal) StringFixed(places int32) string { + rounded := d.Round(places) + return rounded.string(false) +} + +// StringFixedBank returns a banker rounded fixed-point string with places digits +// after the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" +func (d Decimal) StringFixedBank(places int32) string { + rounded := d.RoundBank(places) + return rounded.string(false) +} + +// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For +// more details see the documentation at function RoundCash. +func (d Decimal) StringFixedCash(interval uint8) string { + rounded := d.RoundCash(interval) + return rounded.string(false) +} + +// Round rounds the decimal to places decimal places. +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Example: +// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" +func (d Decimal) Round(places int32) Decimal { + if d.exp == -places { + return d + } + // truncate to places + 1 + ret := d.rescale(-places - 1) + + // add sign(d) * 0.5 + if ret.value.Sign() < 0 { + ret.value.Sub(ret.value, fiveInt) + } else { + ret.value.Add(ret.value, fiveInt) + } + + // floor for positive numbers, ceil for negative numbers + _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) + ret.exp++ + if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { + ret.value.Add(ret.value, oneInt) + } + + return ret +} + +// RoundCeil rounds the decimal towards +infinity. +// +// Example: +// +// NewFromFloat(545).RoundCeil(-2).String() // output: "600" +// NewFromFloat(500).RoundCeil(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.4" +func (d Decimal) RoundCeil(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundFloor rounds the decimal towards -infinity. +// +// Example: +// +// NewFromFloat(545).RoundFloor(-2).String() // output: "500" +// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.5" +func (d Decimal) RoundFloor(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundUp rounds the decimal away from zero. +// +// Example: +// +// NewFromFloat(545).RoundUp(-2).String() // output: "600" +// NewFromFloat(500).RoundUp(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.5" +func (d Decimal) RoundUp(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } else if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundDown rounds the decimal towards zero. +// +// Example: +// +// NewFromFloat(545).RoundDown(-2).String() // output: "500" +// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.4" +func (d Decimal) RoundDown(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + return rescaled +} + +// RoundBank rounds the decimal to places decimal places. +// If the final digit to round is equidistant from the nearest two integers the +// rounded value is taken as the even number +// +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Examples: +// +// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" +// NewFromFloat(545).RoundBank(-1).String() // output: "540" +// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" +// NewFromFloat(546).RoundBank(-1).String() // output: "550" +// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" +// NewFromFloat(555).RoundBank(-1).String() // output: "560" +func (d Decimal) RoundBank(places int32) Decimal { + + round := d.Round(places) + remainder := d.Sub(round).Abs() + + half := New(5, -places-1) + if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { + if round.value.Sign() < 0 { + round.value.Add(round.value, oneInt) + } else { + round.value.Sub(round.value, oneInt) + } + } + + return round +} + +// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific +// interval. The amount payable for a cash transaction is rounded to the nearest +// multiple of the minimum currency unit available. The following intervals are +// available: 5, 10, 25, 50 and 100; any other number throws a panic. +// +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// +// For more details: https://en.wikipedia.org/wiki/Cash_rounding +func (d Decimal) RoundCash(interval uint8) Decimal { + var iVal *big.Int + switch interval { + case 5: + iVal = twentyInt + case 10: + iVal = tenInt + case 25: + iVal = fourInt + case 50: + iVal = twoInt + case 100: + iVal = oneInt + default: + panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) + } + dVal := Decimal{ + value: iVal, + } + + // TODO: optimize those calculations to reduce the high allocations (~29 allocs). + return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) +} + +// Floor returns the nearest integer value less than or equal to d. +func (d Decimal) Floor() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z := new(big.Int).Div(d.value, exp) + return Decimal{value: z, exp: 0} +} + +// Ceil returns the nearest integer value greater than or equal to d. +func (d Decimal) Ceil() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) + if m.Cmp(zeroInt) != 0 { + z.Add(z, oneInt) + } + return Decimal{value: z, exp: 0} +} + +// Truncate truncates off digits from the number, without rounding. +// +// NOTE: precision is the last digit that will not be truncated (must be >= 0). +// +// Example: +// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" +func (d Decimal) Truncate(precision int32) Decimal { + d.ensureInitialized() + if precision >= 0 && -precision > d.exp { + return d.rescale(-precision) + } + return d +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + return nil + } + + str, err := unquoteIfQuoted(decimalBytes) + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) + } + + decimal, err := NewFromString(str) + *d = decimal + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Decimal) MarshalJSON() ([]byte, error) { + var str string + if MarshalJSONWithoutQuotes { + str = d.String() + } else { + str = "\"" + d.String() + "\"" + } + return []byte(str), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation +// is already used when encoding to text, this method stores that string as []byte +func (d *Decimal) UnmarshalBinary(data []byte) error { + // Verify we have at least 4 bytes for the exponent. The GOB encoded value + // may be empty. + if len(data) < 4 { + return fmt.Errorf("error decoding binary %v: expected at least 4 bytes, got %d", data, len(data)) + } + + // Extract the exponent + d.exp = int32(binary.BigEndian.Uint32(data[:4])) + + // Extract the value + d.value = new(big.Int) + if err := d.value.GobDecode(data[4:]); err != nil { + return fmt.Errorf("error decoding binary %v: %s", data, err) + } + + return nil +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Decimal) MarshalBinary() (data []byte, err error) { + // exp is written first, but encode value first to know output size + var valueData []byte + if valueData, err = d.value.GobEncode(); err != nil { + return nil, err + } + + // Write the exponent in front, since it's a fixed size + expData := make([]byte, 4, len(valueData)+4) + binary.BigEndian.PutUint32(expData, uint32(d.exp)) + + // Return the byte array + return append(expData, valueData...), nil +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *Decimal) Scan(value interface{}) error { + // first try to see if the data is stored in database as a Numeric datatype + switch v := value.(type) { + + case float32: + *d = NewFromFloat(float64(v)) + return nil + + case float64: + // numeric in sqlite3 sends us float64 + *d = NewFromFloat(v) + return nil + + case int64: + // at least in sqlite3 when the value is 0 in db, the data is sent + // to us as an int64 instead of a float64 ... + *d = New(v, 0) + return nil + + case uint64: + // while clickhouse may send 0 in db as uint64 + *d = NewFromUint64(v) + return nil + + default: + // default is trying to interpret value stored as string + str, err := unquoteIfQuoted(v) + if err != nil { + return err + } + *d, err = NewFromString(str) + return err + } +} + +// Value implements the driver.Valuer interface for database serialization. +func (d Decimal) Value() (driver.Value, error) { + return d.String(), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization. +func (d *Decimal) UnmarshalText(text []byte) error { + str := string(text) + + dec, err := NewFromString(str) + *d = dec + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d Decimal) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +// GobEncode implements the gob.GobEncoder interface for gob serialization. +func (d Decimal) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface for gob serialization. +func (d *Decimal) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// StringScaled first scales the decimal then calls .String() on it. +// +// Deprecated: buggy and unintuitive. Use StringFixed instead. +func (d Decimal) StringScaled(exp int32) string { + return d.rescale(exp).String() +} + +func (d Decimal) string(trimTrailingZeros bool) string { + if d.exp >= 0 { + return d.rescale(0).value.String() + } + + abs := new(big.Int).Abs(d.value) + str := abs.String() + + var intPart, fractionalPart string + + // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN + // and you are on a 32-bit machine. Won't fix this super-edge case. + dExpInt := int(d.exp) + if len(str) > -dExpInt { + intPart = str[:len(str)+dExpInt] + fractionalPart = str[len(str)+dExpInt:] + } else { + intPart = "0" + + num0s := -dExpInt - len(str) + fractionalPart = strings.Repeat("0", num0s) + str + } + + if trimTrailingZeros { + i := len(fractionalPart) - 1 + for ; i >= 0; i-- { + if fractionalPart[i] != '0' { + break + } + } + fractionalPart = fractionalPart[:i+1] + } + + number := intPart + if len(fractionalPart) > 0 { + number += "." + fractionalPart + } + + if d.value.Sign() < 0 { + return "-" + number + } + + return number +} + +func (d *Decimal) ensureInitialized() { + if d.value == nil { + d.value = new(big.Int) + } +} + +// Min returns the smallest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Min(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Min with 0 arguments. +func Min(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) < 0 { + ans = item + } + } + return ans +} + +// Max returns the largest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Max(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Max with 0 arguments. +func Max(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) > 0 { + ans = item + } + } + return ans +} + +// Sum returns the combined total of the provided first and rest Decimals +func Sum(first Decimal, rest ...Decimal) Decimal { + total := first + for _, item := range rest { + total = total.Add(item) + } + + return total +} + +// Avg returns the average value of the provided first and rest Decimals +func Avg(first Decimal, rest ...Decimal) Decimal { + count := New(int64(len(rest)+1), 0) + sum := Sum(first, rest...) + return sum.Div(count) +} + +// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) +func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { + d1.ensureInitialized() + d2.ensureInitialized() + + if d1.exp < d2.exp { + return d1, d2.rescale(d1.exp) + } else if d1.exp > d2.exp { + return d1.rescale(d2.exp), d2 + } + + return d1, d2 +} + +func unquoteIfQuoted(value interface{}) (string, error) { + var bytes []byte + + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", value, value) + } + + // If the amount is quoted, strip the quotes + if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { + bytes = bytes[1 : len(bytes)-1] + } + return string(bytes), nil +} + +// NullDecimal represents a nullable decimal with compatibility for +// scanning null values from the database. +type NullDecimal struct { + Decimal Decimal + Valid bool +} + +func NewNullDecimal(d Decimal) NullDecimal { + return NullDecimal{ + Decimal: d, + Valid: true, + } +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *NullDecimal) Scan(value interface{}) error { + if value == nil { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.Scan(value) +} + +// Value implements the driver.Valuer interface for database serialization. +func (d NullDecimal) Value() (driver.Value, error) { + if !d.Valid { + return nil, nil + } + return d.Decimal.Value() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.UnmarshalJSON(decimalBytes) +} + +// MarshalJSON implements the json.Marshaler interface. +func (d NullDecimal) MarshalJSON() ([]byte, error) { + if !d.Valid { + return []byte("null"), nil + } + return d.Decimal.MarshalJSON() +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization +func (d *NullDecimal) UnmarshalText(text []byte) error { + str := string(text) + + // check for empty XML or XML without body e.g., + if str == "" { + d.Valid = false + return nil + } + if err := d.Decimal.UnmarshalText(text); err != nil { + d.Valid = false + return err + } + d.Valid = true + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d NullDecimal) MarshalText() (text []byte, err error) { + if !d.Valid { + return []byte{}, nil + } + return d.Decimal.MarshalText() +} + +// Trig functions + +// Atan returns the arctangent, in radians, of x. +func (d Decimal) Atan() Decimal { + if d.Equal(NewFromFloat(0.0)) { + return d + } + if d.GreaterThan(NewFromFloat(0.0)) { + return d.satan() + } + return d.Neg().satan().Neg() +} + +func (d Decimal) xatan() Decimal { + P0 := NewFromFloat(-8.750608600031904122785e-01) + P1 := NewFromFloat(-1.615753718733365076637e+01) + P2 := NewFromFloat(-7.500855792314704667340e+01) + P3 := NewFromFloat(-1.228866684490136173410e+02) + P4 := NewFromFloat(-6.485021904942025371773e+01) + Q0 := NewFromFloat(2.485846490142306297962e+01) + Q1 := NewFromFloat(1.650270098316988542046e+02) + Q2 := NewFromFloat(4.328810604912902668951e+02) + Q3 := NewFromFloat(4.853903996359136964868e+02) + Q4 := NewFromFloat(1.945506571482613964425e+02) + z := d.Mul(d) + b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) + b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) + z = b1.Div(b2) + z = d.Mul(z).Add(d) + return z +} + +// satan reduces its argument (known to be positive) +// to the range [0, 0.66] and calls xatan. +func (d Decimal) satan() Decimal { + Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits + Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) + pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) + + if d.LessThanOrEqual(NewFromFloat(0.66)) { + return d.xatan() + } + if d.GreaterThan(Tan3pio8) { + return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) + } + return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) +} + +// sin coefficients +var _sin = [...]Decimal{ + NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd + NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d + NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 + NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 + NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 + NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 +} + +// Sin returns the sine of the radian argument x. +func (d Decimal) Sin() Decimal { + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } else { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } + if sign { + y = y.Neg() + } + return y +} + +// cos coefficients +var _cos = [...]Decimal{ + NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b + NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 + NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 + NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 + NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 + NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b +} + +// Cos returns the cosine of the radian argument x. +func (d Decimal) Cos() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + // make argument positive + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + if j > 1 { + sign = !sign + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } else { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } + if sign { + y = y.Neg() + } + return y +} + +var _tanP = [...]Decimal{ + NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 + NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd + NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 +} +var _tanQ = [...]Decimal{ + NewFromFloat(1.00000000000000000000e+0), + NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 + NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 + NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef + NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 +} + +// Tan returns the tangent of the radian argument x. +func (d Decimal) Tan() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if zz.GreaterThan(NewFromFloat(1e-14)) { + w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) + x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) + y = z.Add(z.Mul(w.Div(x))) + } else { + y = z + } + if j&2 == 2 { + y = NewFromFloat(-1.0).Div(y) + } + if sign { + y = y.Neg() + } + return y +} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go new file mode 100644 index 000000000000..d4b0cd00795c --- /dev/null +++ b/vendor/github.com/shopspring/decimal/rounding.go @@ -0,0 +1,160 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely reconstructed. +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1<= d.nd { + break + } + li := ui - upper.dp + lower.dp + l := byte('0') // lower digit + if li >= 0 && li < lower.nd { + l = lower.d[li] + } + m := byte('0') // middle digit + if mi >= 0 { + m = d.d[mi] + } + u := byte('0') // upper digit + if ui < upper.nd { + u = upper.d[ui] + } + + // Okay to round down (truncate) if lower has a different digit + // or if lower is inclusive and is exactly the result of rounding + // down (i.e., and we have reached the final digit of lower). + okdown := l != m || inclusive && li+1 == lower.nd + + switch { + case upperdelta == 0 && m+1 < u: + // Example: + // m = 12345xxx + // u = 12347xxx + upperdelta = 2 + case upperdelta == 0 && m != u: + // Example: + // m = 12345xxx + // u = 12346xxx + upperdelta = 1 + case upperdelta == 1 && (m != '9' || u != '0'): + // Example: + // m = 1234598x + // u = 1234600x + upperdelta = 2 + } + // Okay to round up if upper has a different digit and either upper + // is inclusive or upper is bigger than the result of rounding up. + okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd) + + // If it's okay to do either, then round to the nearest one. + // If it's okay to do only one, do it. + switch { + case okdown && okup: + d.Round(mi + 1) + return + case okdown: + d.RoundDown(mi + 1) + return + case okup: + d.RoundUp(mi + 1) + return + } + } +} diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 58141f02f109..0e9e1459358f 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,9 +1,9 @@ # cast -[![Build Status](https://github.com/spf13/cast/actions/workflows/ci.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/ci.yml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index d49bbf83ecb1..cd9c04885acc 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -18,6 +18,14 @@ import ( var errNegativeNotAllowed = errors.New("unable to cast negative value") +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { return ToTimeInDefaultLocationE(i, time.UTC) @@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return - case json.Number: + case float64EProvider: var v float64 v, err = s.Float64() d = time.Duration(v) return + case float64Provider: + d = time.Duration(s.Float64()) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -174,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case float64Provider: + return s.Float64(), nil case bool: if s { return 1, nil @@ -230,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case float64Provider: + return float32(s.Float64()), nil case bool: if s { return 1, nil @@ -917,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} { return nil } - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + errorType := reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() v := reflect.ValueOf(a) for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { @@ -987,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an interface to a map[string]string type. func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} + m := map[string]string{} switch v := i.(type) { case map[string]string: @@ -1017,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an interface to a map[string][]string type. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} + m := map[string][]string{} switch v := i.(type) { case map[string][]string: @@ -1081,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an interface to a map[string]bool type. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} + m := map[string]bool{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1106,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an interface to a map[string]interface{} type. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} + m := map[string]interface{}{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1126,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToStringMapIntE casts an interface to a map[string]int{} type. func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} + m := map[string]int{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) } @@ -1167,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) { // ToStringMapInt64E casts an interface to a map[string]int64{} type. func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} + m := map[string]int64{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) } @@ -1404,38 +1419,35 @@ func (f timeFormat) hasTimezone() bool { return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone } -var ( - timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) +var timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, +} func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { if d, e = time.Parse(format.format, s); e == nil { diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000000..55ede8a42cc9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 000000000000..a4f5f1458ffb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,41 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## Usage + jsonText := `{ + "name": "Bobby B", + "occupation": { + "title" : "King", + "years" : 15, + "heir" : "Joffrey B" + } + }` + + var jsonDocument map[string]interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + //create a JSON pointer + pointerString := "/occupation/title" + pointer, _ := NewJsonPointer(pointerString) + + //SET a new value for the "title" in the document + pointer.Set(jsonDocument, "Supreme Leader of Westeros") + + //GET the new "title" from the document + title, _, _ := pointer.Get(jsonDocument) + fmt.Println(title) //outputs "Supreme Leader of Westeros" + + //DELETE the "heir" from the document + deletePointer := NewJsonPointer("/occupation/heir") + deletePointer.Delete(jsonDocument) + + b, _ := json.Marshal(jsonDocument) + fmt.Println(string(b)) + //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` + + +## References +https://tools.ietf.org/html/rfc6901 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 000000000000..798c1f1c57f9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,211 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Uses the pointer to delete a value from a JSON document +func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { + is := &implStruct{mode: "DEL", inDocument: document} + p.implementation(is) + return document, is.outError +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + previousNodes := make([]interface{}, len(p.referenceTokens)) + previousTokens := make([]string, len(p.referenceTokens)) + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + previousNodes[ti] = node + previousTokens[ti] = token + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else if isLastToken && i.mode == "DEL" { + delete(v, decodedToken) + } + } else if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } else if isLastToken && i.mode == "DEL" { + v[tokenIndex] = v[len(v)-1] + v[len(v)-1] = nil + v = v[:len(v)-1] + previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000000..55ede8a42cc9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 000000000000..9ab6e1eb1333 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 000000000000..6457291301db --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,147 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "net/url" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonpointer" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + if child.GetUrl() == nil { + return nil, errors.New("childUrl is nil!") + } + + if r.GetUrl() == nil { + return nil, errors.New("parentUrl is nil!") + } + + // Get a copy of the parent url to make sure we do not modify the original. + // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. + // The fragment of the child must be used, so the fragment of the parent is manually removed. + parentUrl := *r.GetUrl() + parentUrl.Fragment = "" + + ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 000000000000..68e993ce3e0c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1,3 @@ +*.sw[nop] +*.iml +.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 000000000000..3289001cd18e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - "1.11" + - "1.12" + - "1.13" +before_install: + - go get github.com/xeipuuv/gojsonreference + - go get github.com/xeipuuv/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000000..55ede8a42cc9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 000000000000..758f26df0f18 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,466 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + + +## Loading local schemas + +By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. + +```go + sl := gojsonschema.NewSchemaLoader() + loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) + err := sl.AddSchema("http://some_host.com/string.json", loader1) +``` + +Alternatively if your schema already has an `$id` you can use the `AddSchemas` function +```go + loader2 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/maxlength.json", + "maxLength" : 5 + }`) + err = sl.AddSchemas(loader2) +``` + +The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. +```go + loader3 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/main.json", + "allOf" : [ + { "$ref" : "http://some_host.com/string.json" }, + { "$ref" : "http://some_host.com/maxlength.json" } + ] + }`) + + schema, err := sl.Compile(loader3) + + documentLoader := gojsonschema.NewStringLoader(`"hello world"`) + + result, err := schema.Validate(documentLoader) +``` + +It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. + +```go +err = sl.AddSchemas(loader3) +schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) +``` + +Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. + +## Using a specific draft +By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. + +Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Draft = gojsonschema.Draft7 +sl.AutoDetect = false +``` + +If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. + +## Meta-schema validation +Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. + +The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Validate = true +err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ + $id" : "http://some_host.com/invalid.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "multipleOf" : true +}`)) + ``` +``` + ``` + +Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. + +Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. + + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "const": ConstEror + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "contains" : ArrayContainsError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "invalid_property_name": InvalidPropertyNameError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + "condition_then" : ConditionThenError + "condition_else" : ConditionElseError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: + +````json +{"type": "string", "format": "email"} +```` + +Not all formats defined in draft-07 are available. Implemented formats are: + +* `date` +* `time` +* `date-time` +* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. +* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. +* `idn-email`. Same caveat as `email`. +* `ipv4` +* `ipv6` +* `uri`. Includes unicode support. +* `uri-reference`. Includes unicode support. +* `iri` +* `iri-reference` +* `uri-template` +* `uuid` +* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. +* `json-pointer` +* `relative-json-pointer` + +`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific +unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. + +The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + +Formats can also be removed, for example if you want to override one of the formats that is defined by default. + +```go +gojsonschema.FormatCheckers.Remove("hostname") +``` + + +## Additional custom validation +After the validation has run and you have the results, you may add additional +errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead +of having to add special exceptions for your own errors. Below is an example. + +```go +type AnswerInvalidError struct { + gojsonschema.ResultErrorFields +} + +func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { + err := AnswerInvalidError{} + err.SetContext(context) + err.SetType("custom_invalid_error") + // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed + // using the description of err will be overridden by this. + err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") + err.SetValue(value) + err.SetDetails(details) + + return &err +} + +func main() { + // ... + schema, err := gojsonschema.NewSchema(schemaLoader) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + + if true { // some validation + jsonContext := gojsonschema.NewJsonContext("question", nil) + errDetail := gojsonschema.ErrorDetails{ + "answer": 42, + } + result.AddError( + newAnswerInvalidError( + gojsonschema.NewJsonContext("answer", jsonContext), + 52, + errDetail, + ), + errDetail, + ) + } + + return result, err + +} +``` + +This is especially useful if you want to add validation beyond what the +json schema drafts can provide such business specific logic. + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go new file mode 100644 index 000000000000..61298e7aa0ff --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go @@ -0,0 +1,125 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "errors" + "math" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +// Draft is a JSON-schema draft version +type Draft int + +// Supported Draft versions +const ( + Draft4 Draft = 4 + Draft6 Draft = 6 + Draft7 Draft = 7 + Hybrid Draft = math.MaxInt32 +) + +type draftConfig struct { + Version Draft + MetaSchemaURL string + MetaSchema string +} +type draftConfigs []draftConfig + +var drafts draftConfigs + +func init() { + drafts = []draftConfig{ + { + Version: Draft4, + MetaSchemaURL: "http://json-schema.org/draft-04/schema", + MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, + }, + { + Version: Draft6, + MetaSchemaURL: "http://json-schema.org/draft-06/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, + }, + { + Version: Draft7, + MetaSchemaURL: "http://json-schema.org/draft-07/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, + }, + } +} + +func (dc draftConfigs) GetMetaSchema(url string) string { + for _, config := range dc { + if config.MetaSchemaURL == url { + return config.MetaSchema + } + } + return "" +} +func (dc draftConfigs) GetDraftVersion(url string) *Draft { + for _, config := range dc { + if config.MetaSchemaURL == url { + return &config.Version + } + } + return nil +} +func (dc draftConfigs) GetSchemaURL(draft Draft) string { + for _, config := range dc { + if config.Version == draft { + return config.MetaSchemaURL + } + } + return "" +} + +func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { + + if isKind(documentNode, reflect.Bool) { + return "", nil, nil + } + + if !isKind(documentNode, reflect.Map) { + return "", nil, errors.New("schema is invalid") + } + + m := documentNode.(map[string]interface{}) + + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return "", nil, errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": KEY_SCHEMA, + "type": TYPE_STRING, + }, + )) + } + + schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) + + if err != nil { + return "", nil, err + } + + schema := schemaReference.String() + + return schema, drafts.GetDraftVersion(schema), nil + } + + return "", nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 000000000000..e4e9814f3184 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,364 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + + // FalseError. ErrorDetails: - + FalseError struct { + ResultErrorFields + } + + // RequiredError indicates that a required field is missing + // ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError indicates that a field has the incorrect type + // ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError is produced in case of a failing "anyOf" validation + // ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError is produced in case of a failing "oneOf" validation + // ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError is produced in case of a failing "allOf" validation + // ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError is produced if a "not" validation failed + // ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError is produced in case of a "missing dependency" problem + // ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError indicates an internal error + // ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // ConstError indicates a const error + // ErrorDetails: allowed + ConstError struct { + ResultErrorFields + } + + // EnumError indicates an enum error + // ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed + // ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError is produced if an array contains less items than the allowed minimum + // ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum + // ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items + // ErrorDetails: type, i, j + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayContainsError is produced if an array contains invalid items + // ErrorDetails: + ArrayContainsError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum + // ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum + // ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed + // ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError is produced if an pattern was found + // ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // InvalidPropertyNameError is produced if an invalid-named property was found + // ErrorDetails: property + InvalidPropertyNameError struct { + ResultErrorFields + } + + // StringLengthGTEError is produced if a string is shorter than the minimum required length + // ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError is produced if a string is longer than the maximum allowed length + // ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError is produced if a string does not match the defined pattern + // ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError is produced if a string does not match the defined format + // ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError is produced if a number is not a multiple of the defined multipleOf + // ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError is produced if a number is lower than the allowed minimum + // ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set + // ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError is produced if a number is higher than the allowed maximum + // ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set + // ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } + + // ConditionThenError is produced if a condition's "then" validation is invalid + // ErrorDetails: - + ConditionThenError struct { + ResultErrorFields + } + + // ConditionElseError is produced if a condition's "else" condition is invalid + // ErrorDetails: - + ConditionElseError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *FalseError: + t = "false" + d = locale.False() + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *ConstError: + t = "const" + d = locale.Const() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayContainsError: + t = "contains" + d = locale.ArrayContains() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *InvalidPropertyNameError: + t = "invalid_property_name" + d = locale.InvalidPropertyName() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + case *ConditionThenError: + t = "condition_then" + d = locale.ConditionThen() + case *ConditionElseError: + t = "condition_else" + d = locale.ConditionElse() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + err.SetDescriptionFormat(d) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 000000000000..873ffc7d79b8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,368 @@ +package gojsonschema + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + // IsFormat checks if input has the correct format and type + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatChecker verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the IPv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the IPv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // DateFormatChecker verifies date formats + // + // Valid format: + // Full Date: YYYY-MM-DD + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + DateFormatChecker struct{} + + // TimeFormatChecker verifies time formats + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Time: HH:MM:SSZ-07:00 + // + // Where + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + TimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // URITemplateFormatChecker validates a URI template per RFC6570 + URITemplateFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} + + // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 + JSONPointerFormatChecker struct{} + + // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format + RelativeJSONPointerFormatChecker struct{} +) + +var ( + // FormatCheckers holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date": DateFormatChecker{}, + "time": TimeFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "idn-email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "iri": URIFormatChecker{}, + "iri-reference": URIReferenceFormatChecker{}, + "uri-template": URITemplateFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + "json-pointer": JSONPointerFormatChecker{}, + "relative-json-pointer": RelativeJSONPointerFormatChecker{}, + }, + } + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI + rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + + rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") + + rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") + + lock = new(sync.RWMutex) +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + lock.Lock() + c.formatters[name] = f + lock.Unlock() + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + lock.Lock() + delete(c.formatters, name) + lock.Unlock() + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + lock.RLock() + _, ok := c.formatters[name] + lock.RUnlock() + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + lock.RLock() + f, ok := c.formatters[name] + lock.RUnlock() + + // If a format is unrecognized it should always pass validation + if !ok { + return true + } + + return f.IsFormat(input) +} + +// IsFormat checks if input is a correctly formatted e-mail address +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := mail.ParseAddress(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted IPv4-address +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// IsFormat checks if input is a correctly formatted IPv6=address +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) +func (f DateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + _, err := time.Parse("2006-01-02", asString) + return err == nil +} + +// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) +func (f TimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { + return true + } + + _, err := time.Parse("15:04:05", asString) + return err == nil +} + +// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 +func (f URIFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + + if err != nil || u.Scheme == "" { + return false + } + + return !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := url.Parse(asString) + return err == nil && !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI template per RFC6570 +func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + if err != nil || strings.Contains(asString, `\`) { + return false + } + + return rxURITemplate.MatchString(u.Path) +} + +// IsFormat checks if input is a correctly formatted hostname +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +// IsFormat checks if input is a correctly formatted UUID +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted regular expression +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 +func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxJSONPointer.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted relative JSON Pointer +func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxRelJSONPointer.MatchString(asString) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 000000000000..ab6fb867c5c5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 000000000000..4ef7a8d03e71 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 000000000000..0e979707b4a3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,73 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// JsonContext implements a persistent linked-list of strings +type JsonContext struct { + head string + tail *JsonContext +} + +// NewJsonContext creates a new JsonContext +func NewJsonContext(head string, tail *JsonContext) *JsonContext { + return &JsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *JsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *JsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 000000000000..5d88af263e5b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,386 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSONLoader defines the JSON loader interface +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +// JSONLoaderFactory defines the JSON loader factory interface +type JSONLoaderFactory interface { + // New creates a new JSON loader for the given source + New(source string) JSONLoader +} + +// DefaultJSONLoaderFactory is the default JSON loader factory +type DefaultJSONLoaderFactory struct { +} + +// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +// New creates a new JSON loader for the given source +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// New creates a new JSON loader for the given source +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +// Opens a file with the given name +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.TrimPrefix(refToURL.String(), "file://") + filename, err = url.QueryUnescape(filename) + + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + filename = strings.TrimPrefix(filename, "/") + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToURL.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + // returned cached versions for metaschemas for drafts 4, 6 and 7 + // for performance and allow for easier offline use + if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { + return decodeJSONUsingNumber(strings.NewReader(metaSchema)) + } + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewStringLoader creates a new JSONLoader, taking a string as source +func NewStringLoader(source string) JSONLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source +func NewBytesLoader(source []byte) JSONLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewGoLoader creates a new JSONLoader from a given Go struct +func NewGoLoader(source interface{}) JSONLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +// NewReaderLoader creates a new JSON loader using the provided io.Reader +func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +// NewWriterLoader creates a new JSON loader using the provided io.Writer +func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// JSON raw loader +// In case the JSON is already marshalled to interface{} use this loader +// This is used for testing as otherwise there is no guarantee the JSON is marshalled +// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber +type jsonRawLoader struct { + source interface{} +} + +// NewRawLoader creates a new JSON raw loader for the given source +func NewRawLoader(source interface{}) JSONLoader { + return &jsonRawLoader{source: source} +} +func (l *jsonRawLoader) JsonSource() interface{} { + return l.source +} +func (l *jsonRawLoader) LoadJSON() (interface{}, error) { + return l.source, nil +} +func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} +func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 000000000000..a416225cdb38 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,472 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + + // False returns a format-string for "false" schema validation errors + False() string + + // Required returns a format-string for "required" schema validation errors + Required() string + + // InvalidType returns a format-string for "invalid type" schema validation errors + InvalidType() string + + // NumberAnyOf returns a format-string for "anyOf" schema validation errors + NumberAnyOf() string + + // NumberOneOf returns a format-string for "oneOf" schema validation errors + NumberOneOf() string + + // NumberAllOf returns a format-string for "allOf" schema validation errors + NumberAllOf() string + + // NumberNot returns a format-string to format a NumberNotError + NumberNot() string + + // MissingDependency returns a format-string for "missing dependency" schema validation errors + MissingDependency() string + + // Internal returns a format-string for internal errors + Internal() string + + // Const returns a format-string to format a ConstError + Const() string + + // Enum returns a format-string to format an EnumError + Enum() string + + // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema + ArrayNotEnoughItems() string + + // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError + ArrayNoAdditionalItems() string + + // ArrayMinItems returns a format-string to format an ArrayMinItemsError + ArrayMinItems() string + + // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError + ArrayMaxItems() string + + // Unique returns a format-string to format an ItemsMustBeUniqueError + Unique() string + + // ArrayContains returns a format-string to format an ArrayContainsError + ArrayContains() string + + // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError + ArrayMinProperties() string + + // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError + ArrayMaxProperties() string + + // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError + AdditionalPropertyNotAllowed() string + + // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError + InvalidPropertyPattern() string + + // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError + InvalidPropertyName() string + + // StringGTE returns a format-string to format an StringLengthGTEError + StringGTE() string + + // StringLTE returns a format-string to format an StringLengthLTEError + StringLTE() string + + // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError + DoesNotMatchPattern() string + + // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError + DoesNotMatchFormat() string + + // MultipleOf returns a format-string to format an MultipleOfError + MultipleOf() string + + // NumberGTE returns a format-string to format an NumberGTEError + NumberGTE() string + + // NumberGT returns a format-string to format an NumberGTError + NumberGT() string + + // NumberLTE returns a format-string to format an NumberLTEError + NumberLTE() string + + // NumberLT returns a format-string to format an NumberLTError + NumberLT() string + + // Schema validations + + // RegexPattern returns a format-string to format a regex-pattern error + RegexPattern() string + + // GreaterThanZero returns a format-string to format an error where a number must be greater than zero + GreaterThanZero() string + + // MustBeOfA returns a format-string to format an error where a value is of the wrong type + MustBeOfA() string + + // MustBeOfAn returns a format-string to format an error where a value is of the wrong type + MustBeOfAn() string + + // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error + CannotBeUsedWithout() string + + // CannotBeGT returns a format-string to format an error where a value are greater than allowed + CannotBeGT() string + + // MustBeOfType returns a format-string to format an error where a value does not match the required type + MustBeOfType() string + + // MustBeValidRegex returns a format-string to format an error where a regex is invalid + MustBeValidRegex() string + + // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format + MustBeValidFormat() string + + // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 + MustBeGTEZero() string + + // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed + KeyCannotBeGreaterThan() string + + // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type + KeyItemsMustBeOfType() string + + // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique + KeyItemsMustBeUnique() string + + // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error + ReferenceMustBeCanonical() string + + // NotAValidType returns a format-string to format an invalid type error + NotAValidType() string + + // Duplicated returns a format-string to format an error where types are duplicated + Duplicated() string + + // HttpBadStatus returns a format-string for errors when loading a schema using HTTP + HttpBadStatus() string + + // ParseError returns a format-string for JSON parsing errors + ParseError() string + + // ConditionThen returns a format-string for ConditionThenError errors + ConditionThen() string + + // ConditionElse returns a format-string for ConditionElseError errors + ConditionElse() string + + // ErrorFormat returns a format string for errors + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +// False returns a format-string for "false" schema validation errors +func (l DefaultLocale) False() string { + return "False always fails validation" +} + +// Required returns a format-string for "required" schema validation errors +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +// InvalidType returns a format-string for "invalid type" schema validation errors +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +// NumberAnyOf returns a format-string for "anyOf" schema validation errors +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +// NumberOneOf returns a format-string for "oneOf" schema validation errors +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +// NumberAllOf returns a format-string for "allOf" schema validation errors +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +// NumberNot returns a format-string to format a NumberNotError +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +// MissingDependency returns a format-string for "missing dependency" schema validation errors +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +// Internal returns a format-string for internal errors +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +// Const returns a format-string to format a ConstError +func (l DefaultLocale) Const() string { + return `{{.field}} does not match: {{.allowed}}` +} + +// Enum returns a format-string to format an EnumError +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +// ArrayMinItems returns a format-string to format an ArrayMinItemsError +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +// Unique returns a format-string to format an ItemsMustBeUniqueError +func (l DefaultLocale) Unique() string { + return `{{.type}} items[{{.i}},{{.j}}] must be unique` +} + +// ArrayContains returns a format-string to format an ArrayContainsError +func (l DefaultLocale) ArrayContains() string { + return `At least one of the items must match` +} + +// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError +func (l DefaultLocale) InvalidPropertyName() string { + return `Property name of "{{.property}}" does not match` +} + +// StringGTE returns a format-string to format an StringLengthGTEError +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +// StringLTE returns a format-string to format an StringLengthLTEError +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +// MultipleOf returns a format-string to format an MultipleOfError +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +// NumberGTE returns the format string to format a NumberGTEError +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +// NumberGT returns the format string to format a NumberGTError +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +// NumberLTE returns the format string to format a NumberLTEError +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +// NumberLT returns the format string to format a NumberLTError +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators + +// RegexPattern returns a format-string to format a regex-pattern error +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +// GreaterThanZero returns a format-string to format an error where a number must be greater than zero +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +// MustBeOfA returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +// MustBeOfAn returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +// CannotBeGT returns a format-string to format an error where a value are greater than allowed +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +// MustBeOfType returns a format-string to format an error where a value does not match the required type +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +// MustBeValidRegex returns a format-string to format an error where a regex is invalid +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +// NotAValidType returns a format-string to format an invalid type error +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +// Duplicated returns a format-string to format an error where types are duplicated +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +// HttpBadStatus returns a format-string for errors when loading a schema using HTTP +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// ErrorFormat returns a format string for errors +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +// ParseError returns a format-string for JSON parsing errors +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +// ConditionThen returns a format-string for ConditionThenError errors +// If/Else +func (l DefaultLocale) ConditionThen() string { + return `Must validate "then" as "if" was valid` +} + +// ConditionElse returns a format-string for ConditionElseError errors +func (l DefaultLocale) ConditionElse() string { + return `Must validate "else" as "if" was not valid` +} + +// constants +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 000000000000..0a0179148ba0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,220 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + // Field returns the field name without the root context + // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName + Field() string + // SetType sets the error-type + SetType(string) + // Type returns the error-type + Type() string + // SetContext sets the JSON-context for the error + SetContext(*JsonContext) + // Context returns the JSON-context of the error + Context() *JsonContext + // SetDescription sets a description for the error + SetDescription(string) + // Description returns the description of the error + Description() string + // SetDescriptionFormat sets the format for the description in the default text/template format + SetDescriptionFormat(string) + // DescriptionFormat returns the format for the description in the default text/template format + DescriptionFormat() string + // SetValue sets the value related to the error + SetValue(interface{}) + // Value returns the value related to the error + Value() interface{} + // SetDetails sets the details specific to the error + SetDetails(ErrorDetails) + // Details returns details about the error + Details() ErrorDetails + // String returns a string representation of the error + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + descriptionFormat string // A format for human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + // Result holds the result of a validation + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field returns the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +// SetType sets the error-type +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +// Type returns the error-type +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +// SetContext sets the JSON-context for the error +func (v *ResultErrorFields) SetContext(context *JsonContext) { + v.context = context +} + +// Context returns the JSON-context of the error +func (v *ResultErrorFields) Context() *JsonContext { + return v.context +} + +// SetDescription sets a description for the error +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +// Description returns the description of the error +func (v *ResultErrorFields) Description() string { + return v.description +} + +// SetDescriptionFormat sets the format for the description in the default text/template format +func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { + v.descriptionFormat = descriptionFormat +} + +// DescriptionFormat returns the format for the description in the default text/template format +func (v *ResultErrorFields) DescriptionFormat() string { + return v.descriptionFormat +} + +// SetValue sets the value related to the error +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +// Value returns the value related to the error +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +// SetDetails sets the details specific to the error +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +// Details returns details about the error +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +// String returns a string representation of the error +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJSONString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +// Valid indicates if no errors were found +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +// Errors returns the errors that were found +func (v *Result) Errors() []ResultError { + return v.errors +} + +// AddError appends a fully filled error to the error set +// SetDescription() will be called with the result of the parsed err.DescriptionFormat() +func (v *Result) AddError(err ResultError, details ErrorDetails) { + if _, exists := details["context"]; !exists && err.Context() != nil { + details["context"] = err.Context().String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) + + v.errors = append(v.errors, err) +} + +func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 000000000000..9e93cd79558e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,1087 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "math/big" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +// NewSchema instances a schema using the given JSONLoader +func NewSchema(l JSONLoader) (*Schema, error) { + return NewSchemaLoader().Compile(l) +} + +// Schema holds a schema +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}, draft Draft) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} + return d.parseSchema(document, d.rootSchema) +} + +// SetRootSchemaName sets the root-schema name +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if currentSchema.draft == nil { + if currentSchema.parent == nil { + return errors.New("Draft not set") + } + currentSchema.draft = currentSchema.parent.draft + } + + // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" + if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { + b := documentNode.(bool) + currentSchema.pass = &b + return nil + } + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + // In draft 6 the id keyword was renamed to $id + // Hybrid mode uses the old id by default + var keyID string + + switch *currentSchema.draft { + case Draft4: + keyID = KEY_ID + case Hybrid: + keyID = KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + default: + keyID = KEY_ID_NEW + } + if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": keyID, + }, + )) + } + if k, ok := m[keyID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { + for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map, reflect.Bool) { + + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} + + err := d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + currentSchema.ref = &jsonReference + + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + if err := currentSchema.types.Add(typeInArray.(string)); err != nil { + return err + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // propertyNames + if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { + if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertyNames = newSchema + err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if !ok { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, + )) + } + currentSchema.format = formatString + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + if isStringInSlice(currentSchema.required, requiredValue.(string)) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + currentSchema.required = append(currentSchema.required, requiredValue.(string)) + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { + newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.contains = newSchema + err := d.parseSchema(m[KEY_CONTAINS], newSchema) + if err != nil { + return err + } + } + + // validation : all + + if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { + is, err := marshalWithoutNumber(m[KEY_CONST]) + if err != nil { + return err + } + currentSchema._const = is + } + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + is, err := marshalWithoutNumber(v) + if err != nil { + return err + } + if isStringInSlice(currentSchema.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + currentSchema.enum = append(currentSchema.enum, *is) + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.oneOf = append(currentSchema.oneOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.anyOf = append(currentSchema.anyOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.allOf = append(currentSchema.allOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.not = newSchema + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + if *currentSchema.draft >= Draft7 { + if existsMapKey(m, KEY_IF) { + if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} + currentSchema._if = newSchema + err := d.parseSchema(m[KEY_IF], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_THEN) { + if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} + currentSchema._then = newSchema + err := d.parseSchema(m[KEY_THEN], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_ELSE) { + if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} + currentSchema._else = newSchema + err := d.parseSchema(m[KEY_ELSE], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, + )) + } + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + d.referencePool.Add(currentSchema.ref.String(), newSchema) + + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode = dsp.Document + newSchema.draft = dsp.Draft + + if err != nil { + return err + } + + if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + err = d.parseSchema(refdDocumentNode, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + valuesToRegister = append(valuesToRegister, value.(string)) + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map, reflect.Bool: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go new file mode 100644 index 000000000000..20db0c1f99a5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go @@ -0,0 +1,206 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "bytes" + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +// SchemaLoader is used to load schemas +type SchemaLoader struct { + pool *schemaPool + AutoDetect bool + Validate bool + Draft Draft +} + +// NewSchemaLoader creates a new NewSchemaLoader +func NewSchemaLoader() *SchemaLoader { + + ps := &SchemaLoader{ + pool: &schemaPool{ + schemaPoolDocuments: make(map[string]*schemaPoolDocument), + }, + AutoDetect: true, + Validate: false, + Draft: Hybrid, + } + ps.pool.autoDetect = &ps.AutoDetect + + return ps +} + +func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { + + var ( + schema string + err error + ) + if sl.AutoDetect { + schema, _, err = parseSchemaURL(documentNode) + if err != nil { + return err + } + } + + // If no explicit "$schema" is used, use the default metaschema associated with the draft used + if schema == "" { + if sl.Draft == Hybrid { + return nil + } + schema = drafts.GetSchemaURL(sl.Draft) + } + + //Disable validation when loading the metaschema to prevent an infinite recursive loop + sl.Validate = false + + metaSchema, err := sl.Compile(NewReferenceLoader(schema)) + + if err != nil { + return err + } + + sl.Validate = true + + result := metaSchema.validateDocument(documentNode) + + if !result.Valid() { + var res bytes.Buffer + for _, err := range result.Errors() { + res.WriteString(err.String()) + res.WriteString("\n") + } + return errors.New(res.String()) + } + + return nil +} + +// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require +// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema +func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { + emptyRef, _ := gojsonreference.NewJsonReference("") + + for _, loader := range loaders { + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + // Directly use the Recursive function, so that it get only added to the schema pool by $id + // and not by the ref of the document as it's empty + if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { + return err + } + } + + return nil +} + +//AddSchema adds a schema under the provided URL to the schema cache +func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { + + ref, err := gojsonreference.NewJsonReference(url) + + if err != nil { + return err + } + + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + return sl.pool.parseReferences(doc, ref, true) +} + +// Compile loads and compiles a schema +func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { + + ref, err := rootSchema.JsonReference() + + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = sl.pool + d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = rootSchema.LoadJSON() + if err != nil { + return nil, err + } + // References need only be parsed if loading JSON directly + // as pool.GetDocument already does this for us if loading by reference + err = sl.pool.parseReferences(doc, ref, true) + if err != nil { + return nil, err + } + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return nil, err + } + } + + draft := sl.Draft + if sl.AutoDetect { + _, detectedDraft, err := parseSchemaURL(doc) + if err != nil { + return nil, err + } + if detectedDraft != nil { + draft = *detectedDraft + } + } + + err = d.parse(doc, draft) + if err != nil { + return nil, err + } + + return &d, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 000000000000..35b1cc630661 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,215 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} + Draft *Draft +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + jsonLoaderFactory JSONLoaderFactory + autoDetect *bool +} + +func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { + + var ( + draft *Draft + err error + reference = ref.String() + ) + // Only the root document should be added to the schema pool if pooled is true + if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { + return fmt.Errorf("Reference already exists: \"%s\"", reference) + } + + if *p.autoDetect { + _, draft, err = parseSchemaURL(document) + if err != nil { + return err + } + } + + err = p.parseReferencesRecursive(document, ref, draft) + + if pooled { + p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} + } + + return err +} + +func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { + // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. + // For $ref references it takes into account the $id scope it is in and replaces + // the reference by the absolute resolved reference + + // When encountering errors it fails silently. Error handling is done when the schema + // is syntactically parsed and any error encountered here should also come up there. + switch m := document.(type) { + case []interface{}: + for _, v := range m { + p.parseReferencesRecursive(v, ref, draft) + } + case map[string]interface{}: + localRef := &ref + + keyID := KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) + if err == nil { + localRef, err = ref.Inherits(jsonReference) + if err == nil { + if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { + return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) + } + p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} + } + } + } + + if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) + if err == nil { + absoluteRef, err := localRef.Inherits(jsonReference) + if err == nil { + m[KEY_REF] = absoluteRef.String() + } + } + } + + for k, v := range m { + // const and enums should be interpreted literally, so ignore them + if k == KEY_CONST || k == KEY_ENUM { + continue + } + // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc + // Therefore don't treat it like a schema. + if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { + if child, ok := v.(map[string]interface{}); ok { + for _, v := range child { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } else { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } + return nil +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + draft *Draft + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // Create a deep copy, so we can remove the fragment part later on without altering the original + refToURL, _ := gojsonreference.NewJsonReference(reference.String()) + + // First check if the given fragment is a location independent identifier + // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 + + if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + // If the given reference is not a location independent identifier, + // strip the fragment and look for a document with it's base URI + + refToURL.GetUrl().Fragment = "" + + if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { + document, _, err := reference.GetPointer().Get(cachedSpd.Document) + + if err != nil { + return nil, err + } + + if internalLogEnabled { + internalLog(" From pool") + } + + spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} + p.schemaPoolDocuments[reference.String()] = spd + + return spd, nil + } + + // It is not possible to load anything remotely that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference.String()}, + )) + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + + if err != nil { + return nil, err + } + + // add the whole document to the pool for potential re-use + p.parseReferences(document, refToURL, true) + + _, draft, _ = parseSchemaURL(document) + + // resolve the potential fragment and also cache it + document, _, err = reference.GetPointer().Get(document) + + if err != nil { + return nil, err + } + + return &schemaPoolDocument{Document: document, Draft: draft}, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 000000000000..6e5e1b5cdb36 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 000000000000..36b447a2915b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 000000000000..ec779812c319 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,149 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "github.com/xeipuuv/gojsonreference" + "math/big" + "regexp" +) + +// Constants +const ( + KEY_SCHEMA = "$schema" + KEY_ID = "id" + KEY_ID_NEW = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_PROPERTY_NAMES = "propertyNames" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_CONTAINS = "contains" + KEY_CONST = "const" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" + KEY_IF = "if" + KEY_THEN = "then" + KEY_ELSE = "else" +) + +type subSchema struct { + draft *Draft + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Quick pass/fail for boolean schemas + pass *bool + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + + // hierarchy + parent *subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *big.Rat + maximum *big.Rat + exclusiveMaximum *big.Rat + minimum *big.Rat + exclusiveMinimum *big.Rat + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + propertyNames *subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + contains *subSchema + + additionalItems interface{} + + // validation : all + _const *string //const is a golang keyword + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema + _if *subSchema // if/else are golang keywords + _then *subSchema + _else *subSchema +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 000000000000..0e6fd517350d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,62 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +// Type constants +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +// JSON_TYPES hosts the list of type that are supported in JSON +var JSON_TYPES []string + +// SCHEMA_TYPES hosts the list of type that are supported in schemas +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 000000000000..a17d22e3bd0b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,197 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" +) + +func isKind(what interface{}, kinds ...reflect.Kind) bool { + target := what + if isJSONNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + targetKind := reflect.ValueOf(target).Kind() + for _, kind := range kinds { + if targetKind == kind { + return true + } + } + return false +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. +func indexStringInSlice(s []string, what string) int { + for i := range s { + if s[i] == what { + return i + } + } + return -1 +} + +func marshalToJSONString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func marshalWithoutNumber(value interface{}) (*string, error) { + + // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber + // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 + // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber + // so that these differences in representation are removed + + jsonString, err := marshalToJSONString(value) + if err != nil { + return nil, err + } + + var document interface{} + + err = json.Unmarshal([]byte(*jsonString), &document) + if err != nil { + return nil, err + } + + return marshalToJSONString(document) +} + +func isJSONNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJSONInteger(what interface{}) (isInt bool) { + + jsonNumber := what.(json.Number) + + bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) + + return isValidNumber && bigFloat.IsInt() + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func mustBeInteger(what interface{}) *int { + + if isJSONNumber(what) { + + number := what.(json.Number) + + isInt := checkJSONInteger(number) + + if isInt { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *big.Rat { + + if isJSONNumber(what) { + number := what.(json.Number) + float64Value, success := new(big.Rat).SetString(string(number)) + if success { + return float64Value + } + } + + return nil + +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 000000000000..74091bca19ac --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,858 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// Validate loads and validates a JSON schema +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + // load schema + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + return schema.Validate(ld) +} + +// Validate loads and validates a JSON document +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + return v.validateDocument(root), nil +} + +func (v *Schema) validateDocument(root interface{}) *Result { + result := &Result{} + context := NewJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + return result +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle true/false schema as early as possible as all other fields will be nil + if currentSubSchema.pass != nil { + if !*currentSubSchema.pass { + result.addInternalError( + new(FalseError), + context, + currentNode, + ErrorDetails{}, + ) + } + return + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJSONNumber(currentNode) { + + value := currentNode.(json.Number) + + isInt := checkJSONInteger(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isInt { + givenType = TYPE_NUMBER + } + + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := NewJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addInternalError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + } + } + } + } + } + + if currentSubSchema._if != nil { + validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) + if currentSubSchema._then != nil && validationResultIf.Valid() { + validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) + if !validationResultThen.Valid() { + result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultThen) + } + } + if currentSubSchema._else != nil && !validationResultIf.Valid() { + validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) + if !validationResultElse.Valid() { + result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultElse) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // const: + if currentSubSchema._const != nil { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if *vString != *currentSubSchema._const { + result.addInternalError(new(ConstError), + context, + value, + ErrorDetails{ + "allowed": *currentSubSchema._const, + }, + ) + } + } + + // enum: + if len(currentSubSchema.enum) > 0 { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !isStringInSlice(currentSubSchema.enum, *vString) { + result.addInternalError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addInternalError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addInternalError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems = make(map[string]int) + for j, v := range value { + vString, err := marshalWithoutNumber(v) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if i, ok := stringifiedItems[*vString]; ok { + result.addInternalError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, + ) + } + stringifiedItems[*vString] = j + } + } + + // contains: + + if currentSubSchema.contains != nil { + validatedOne := false + var bestValidationResult *Result + + for i, v := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + + validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) + if validationResult.Valid() { + validatedOne = true + break + } else { + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult + } + } + } + if !validatedOne { + result.addInternalError( + new(ArrayContainsError), + context, + value, + ErrorDetails{}, + ) + if bestValidationResult != nil { + result.mergeErrors(bestValidationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addInternalError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addInternalError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addInternalError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + for pk := range value { + + // Check whether this property is described by "properties" + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + // Check whether this property is described by "patternProperties" + ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" + if !found && !ppMatch { + switch ap := currentSubSchema.additionalProperties.(type) { + case bool: + // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema + if !ap { + result.addInternalError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + + } + case *subSchema: + validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) + result.mergeErrors(validationResult) + } + } + } + + // propertyNames: + if currentSubSchema.propertyNames != nil { + for pk := range value { + validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) + if !validationResult.Valid() { + result.addInternalError(new(InvalidPropertyNameError), + context, + value, ErrorDetails{ + "property": pk, + }) + result.mergeErrors(validationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + validated := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + validated = true + subContext := NewJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + } + } + + if !validated { + return false + } + + result.incrementScore() + return true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore JSON numbers + if isJSONNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addInternalError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addInternalError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addInternalError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore non numbers + if !isJSONNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := new(big.Rat).SetString(string(number)) + + // multipleOf: + if currentSubSchema.multipleOf != nil { + if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { + result.addInternalError( + new(MultipleOfError), + context, + number, + ErrorDetails{ + "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), + }, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if float64Value.Cmp(currentSubSchema.maximum) == 1 { + result.addInternalError( + new(NumberLTEError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.maximum), + }, + ) + } + } + if currentSubSchema.exclusiveMaximum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { + result.addInternalError( + new(NumberLTError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), + }, + ) + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if float64Value.Cmp(currentSubSchema.minimum) == -1 { + result.addInternalError( + new(NumberGTEError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.minimum), + }, + ) + } + } + if currentSubSchema.exclusiveMinimum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { + result.addInternalError( + new(NumberGTError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), + }, + ) + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 000000000000..cea48efdcd26 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,424 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package clearsign + +import ( + "bufio" + "bytes" + "crypto" + "fmt" + "hash" + "io" + "net/textproto" + "strconv" + "strings" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by passing Bytes into openpgp.CheckDetachedSignature. +type Block struct { + Headers textproto.MIMEHeader // Optional unverified Hash headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well as +// the suffix of data which remains after the message. Any prefix data is +// discarded. +// +// If no message is found, or if the message is invalid, Decode returns nil and +// the whole data slice. The only allowed header type is Hash, and it is not +// verified against the signature hash. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line and check it does not have a suffix. + suffix, rest := getLine(rest) + if len(suffix) != 0 { + return nil, data + } + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + // Reject headers with control or Unicode characters. + if i := bytes.IndexFunc(line, func(r rune) bool { + return r < 0x20 || r > 0x7e + }); i != -1 { + return nil, data + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := string(line[0:i]), string(line[i+1:]) + key = strings.TrimSpace(key) + if key != "Hash" { + return nil, data + } + val = strings.TrimSpace(val) + b.Headers.Add(key, val) + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + hashers []hash.Hash // one per key in privateKeys + hashType crypto.Hash + toHash io.Writer // writes to all the hashes in hashers + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKeys []*packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.toHash.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.toHash.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.toHash.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + t := d.config.Now() + for i, k := range d.privateKeys { + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = k.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = t + sig.IssuerKeyId = &k.KeyId + + if err = sig.Sign(d.hashers[i], k, d.config); err != nil { + return + } + if err = sig.Serialize(out); err != nil { + return + } + } + + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config) +} + +// EncodeMulti returns a WriteCloser which will clear-sign a message with all the +// private keys indicated and write it to w. If config is nil, sensible defaults +// are used. +func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + for _, k := range privateKeys { + if k.Encrypted { + return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString())) + } + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + var hashers []hash.Hash + var ws []io.Writer + for range privateKeys { + h := hashType.New() + hashers = append(hashers, h) + ws = append(ws, h) + } + toHash := io.MultiWriter(ws...) + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + hashers: hashers, + hashType: hashType, + toHash: toHash, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKeys: privateKeys, + config: config, + } + + return +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 000000000000..76fa40fb20af --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/helm.sh/helm/v3/LICENSE b/vendor/helm.sh/helm/v3/LICENSE new file mode 100644 index 000000000000..21c57fae21f7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Kubernetes Authors All Rights Reserved + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go new file mode 100644 index 000000000000..4ea09cca480e --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go @@ -0,0 +1,50 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fileutil + +import ( + "io" + "os" + "path/filepath" + + "helm.sh/helm/v3/internal/third_party/dep/fs" +) + +// AtomicWriteFile atomically (as atomic as os.Rename allows) writes a file to a +// disk. +func AtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error { + tempFile, err := os.CreateTemp(filepath.Split(filename)) + if err != nil { + return err + } + tempName := tempFile.Name() + + if _, err := io.Copy(tempFile, reader); err != nil { + tempFile.Close() // return value is ignored as we are already on error path + return err + } + + if err := tempFile.Close(); err != nil { + return err + } + + if err := os.Chmod(tempName, mode); err != nil { + return err + } + + return fs.RenameWithFallback(tempName, filename) +} diff --git a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go new file mode 100644 index 000000000000..b6f45da9e2ea --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go @@ -0,0 +1,262 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// Resolver resolves dependencies from semantic version ranges to a particular version. +type Resolver struct { + chartpath string + cachepath string + registryClient *registry.Client +} + +// New creates a new resolver for a given chart, helm home and registry client. +func New(chartpath, cachepath string, registryClient *registry.Client) *Resolver { + return &Resolver{ + chartpath: chartpath, + cachepath: cachepath, + registryClient: registryClient, + } +} + +// Resolve resolves dependencies and returns a lock file with the resolution. +func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + + // Now we clone the dependencies, locking as we go. + locked := make([]*chart.Dependency, len(reqs)) + missing := []string{} + for i, d := range reqs { + constraint, err := semver.NewConstraint(d.Version) + if err != nil { + return nil, errors.Wrapf(err, "dependency %q has an invalid version/constraint format", d.Name) + } + + if d.Repository == "" { + // Local chart subfolder + if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil { + return nil, err + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: "", + Version: d.Version, + } + continue + } + if strings.HasPrefix(d.Repository, "file://") { + chartpath, err := GetLocalPath(d.Repository, r.chartpath) + if err != nil { + return nil, err + } + + ch, err := loader.LoadDir(chartpath) + if err != nil { + return nil, err + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + // Not a legit entry. + continue + } + + if !constraint.Check(v) { + missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version)) + continue + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: ch.Metadata.Version, + } + continue + } + + repoName := repoNames[d.Name] + // if the repository was not defined, but the dependency defines a repository url, bypass the cache + if repoName == "" && d.Repository != "" { + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + + var vs repo.ChartVersions + var version string + var ok bool + found := true + if !registry.IsOCI(d.Repository) { + repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName))) + if err != nil { + return nil, errors.Wrapf(err, "no cached repository for %s found. (try 'helm repo update')", repoName) + } + + vs, ok = repoIndex.Entries[d.Name] + if !ok { + return nil, errors.Errorf("%s chart not found in repo %s", d.Name, d.Repository) + } + found = false + } else { + version = d.Version + + // Check to see if an explicit version has been provided + _, err := semver.NewVersion(version) + + // Use an explicit version, otherwise search for tags + if err == nil { + vs = []*repo.ChartVersion{{ + Metadata: &chart.Metadata{ + Version: version, + }, + }} + + } else { + // Retrieve list of tags for repository + ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, fmt.Sprintf("%s://", registry.OCIScheme)), d.Name) + tags, err := r.registryClient.Tags(ref) + if err != nil { + return nil, errors.Wrapf(err, "could not retrieve list of tags for repository %s", d.Repository) + } + + vs = make(repo.ChartVersions, len(tags)) + for ti, t := range tags { + // Mock chart version objects + version := &repo.ChartVersion{ + Metadata: &chart.Metadata{ + Version: t, + }, + } + vs[ti] = version + } + } + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: version, + } + // The versions are already sorted and hence the first one to satisfy the constraint is used + for _, ver := range vs { + v, err := semver.NewVersion(ver.Version) + // OCI does not need URLs + if err != nil || (!registry.IsOCI(d.Repository) && len(ver.URLs) == 0) { + // Not a legit entry. + continue + } + if constraint.Check(v) { + found = true + locked[i].Version = v.Original() + break + } + } + + if !found { + missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version)) + } + } + if len(missing) > 0 { + return nil, errors.Errorf("can't get a valid version for %d subchart(s): %s. Make sure a matching chart version exists in the repo, or change the version constraint in Chart.yaml", len(missing), strings.Join(missing, ", ")) + } + + digest, err := HashReq(reqs, locked) + if err != nil { + return nil, err + } + + return &chart.Lock{ + Generated: time.Now(), + Digest: digest, + Dependencies: locked, + }, nil +} + +// HashReq generates a hash of the dependencies. +// +// This should be used only to compare against another hash generated by this +// function. +func HashReq(req, lock []*chart.Dependency) (string, error) { + data, err := json.Marshal([2][]*chart.Dependency{req, lock}) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// HashV2Req generates a hash of requirements generated in Helm v2. +// +// This should be used only to compare against another hash generated by the +// Helm v2 hash function. It is to handle issue: +// https://github.com/helm/helm/issues/7233 +func HashV2Req(req []*chart.Dependency) (string, error) { + dep := make(map[string][]*chart.Dependency) + dep["dependencies"] = req + data, err := json.Marshal(dep) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// GetLocalPath generates absolute local path when use +// "file://" in repository of dependencies +func GetLocalPath(repo, chartpath string) (string, error) { + var depPath string + var err error + p := strings.TrimPrefix(repo, "file://") + + // root path is absolute + if strings.HasPrefix(p, "/") { + if depPath, err = filepath.Abs(p); err != nil { + return "", err + } + } else { + depPath = filepath.Join(chartpath, p) + } + + if _, err = os.Stat(depPath); os.IsNotExist(err) { + return "", errors.Errorf("directory %s not found", depPath) + } else if err != nil { + return "", err + } + + return depPath, nil +} diff --git a/vendor/helm.sh/helm/v3/internal/sympath/walk.go b/vendor/helm.sh/helm/v3/internal/sympath/walk.go new file mode 100644 index 000000000000..6b221fb6cc02 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/sympath/walk.go @@ -0,0 +1,120 @@ +/* +Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are +provided under the BSD license. + +https://github.com/golang/go/blob/master/LICENSE + +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sympath + +import ( + "log" + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" +) + +// Walk walks the file tree rooted at root, calling walkFn for each file or directory +// in the tree, including root. All errors that arise visiting files and directories +// are filtered by walkFn. The files are walked in lexical order, which makes the +// output deterministic but means that for very large directories Walk can be +// inefficient. Walk follows symbolic links. +func Walk(root string, walkFn filepath.WalkFunc) error { + info, err := os.Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = symwalk(root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func readDirNames(dirname string) ([]string, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// symwalk recursively descends path, calling walkFn. +func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // Recursively walk symlinked directories. + if IsSymlink(info) { + resolved, err := filepath.EvalSymlinks(path) + if err != nil { + return errors.Wrapf(err, "error evaluating symlink %s", path) + } + //This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons. + log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) + if info, err = os.Lstat(resolved); err != nil { + return err + } + if err := symwalk(path, info, walkFn); err != nil && err != filepath.SkipDir { + return err + } + return nil + } + + if err := walkFn(path, info, nil); err != nil { + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := os.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = symwalk(filename, fileInfo, walkFn) + if err != nil { + if (!fileInfo.IsDir() && !IsSymlink(fileInfo)) || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// IsSymlink is used to determine if the fileinfo is a symbolic link. +func IsSymlink(fi os.FileInfo) bool { + return fi.Mode()&os.ModeSymlink != 0 +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go new file mode 100644 index 000000000000..d29bb5f87192 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go @@ -0,0 +1,372 @@ +/* +Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "io" + "os" + "path/filepath" + "runtime" + "syscall" + + "github.com/pkg/errors" +) + +// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep. +// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go +// No changes to the code were made other than removing some unused functions + +// RenameWithFallback attempts to rename a file or directory, but falls back to +// copying in the event of a cross-device link error. If the fallback copy +// succeeds, src is still removed, emulating normal rename behavior. +func RenameWithFallback(src, dst string) error { + _, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "cannot stat %s", src) + } + + err = os.Rename(src, dst) + if err == nil { + return nil + } + + return renameFallback(err, src, dst) +} + +// renameByCopy attempts to rename a file or directory by copying it to the +// destination and then removing the src thus emulating the rename behavior. +func renameByCopy(src, dst string) error { + var cerr error + if dir, _ := IsDir(src); dir { + cerr = CopyDir(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying directory failed") + } + } else { + cerr = copyFile(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying file failed") + } + } + + if cerr != nil { + return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst) + } + + return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) +} + +var ( + errSrcNotDir = errors.New("source is not a directory") + errDstExist = errors.New("destination already exists") +) + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory must *not* exist. +func CopyDir(src, dst string) error { + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + // We use os.Lstat() here to ensure we don't fall in a loop where a symlink + // actually links to a one of its parent directories. + fi, err := os.Lstat(src) + if err != nil { + return err + } + if !fi.IsDir() { + return errSrcNotDir + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return errDstExist + } + + if err = os.MkdirAll(dst, fi.Mode()); err != nil { + return errors.Wrapf(err, "cannot mkdir %s", dst) + } + + entries, err := os.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "cannot read directory %s", dst) + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err = CopyDir(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying directory failed") + } + } else { + // This will include symlinks, which is what we want when + // copying things. + if err = copyFile(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying file failed") + } + } + } + + return nil +} + +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all its contents will be replaced by the contents +// of the source file. The file mode will be copied from the source. +func copyFile(src, dst string) (err error) { + if sym, err := IsSymlink(src); err != nil { + return errors.Wrap(err, "symlink check failed") + } else if sym { + if err := cloneSymlink(src, dst); err != nil { + if runtime.GOOS == "windows" { + // If cloning the symlink fails on Windows because the user + // does not have the required privileges, ignore the error and + // fall back to copying the file contents. + // + // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx + if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { + return err + } + } else { + return err + } + } else { + return nil + } + } + + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + + if _, err = io.Copy(out, in); err != nil { + out.Close() + return + } + + // Check for write errors on Close + if err = out.Close(); err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + + // Temporary fix for Go < 1.9 + // + // See: https://github.com/golang/dep/issues/774 + // and https://github.com/golang/go/issues/20829 + if runtime.GOOS == "windows" { + dst = fixLongPath(dst) + } + err = os.Chmod(dst, si.Mode()) + + return +} + +// cloneSymlink will create a new symlink that points to the resolved path of sl. +// If sl is a relative symlink, dst will also be a relative symlink. +func cloneSymlink(sl, dst string) error { + resolved, err := os.Readlink(sl) + if err != nil { + return err + } + + return os.Symlink(resolved, dst) +} + +// IsDir determines is the path given is a directory or not. +func IsDir(name string) (bool, error) { + fi, err := os.Stat(name) + if err != nil { + return false, err + } + if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", name) + } + return true, nil +} + +// IsSymlink determines if the given path is a symbolic link. +func IsSymlink(path string) (bool, error) { + l, err := os.Lstat(path) + if err != nil { + return false, err + } + + return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less than 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !isAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} + +func isAbs(path string) (b bool) { + v := volumeName(path) + if v == "" { + return false + } + path = path[len(v):] + if path == "" { + return false + } + return os.IsPathSeparator(path[0]) +} + +func volumeName(path string) (v string) { + if len(path) < 2 { + return "" + } + // with drive letter + c := path[0] + if path[1] == ':' && + ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || + 'A' <= c && c <= 'Z') { + return path[:2] + } + // is it UNC + if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && + !os.IsPathSeparator(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if os.IsPathSeparator(path[n]) { + n++ + // third, following something characters. its share name. + if !os.IsPathSeparator(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if os.IsPathSeparator(path[n]) { + break + } + } + return path[:n] + } + break + } + } + } + return "" +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go new file mode 100644 index 000000000000..a3e5e56a61a6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go @@ -0,0 +1,58 @@ +//go:build !windows + +/* +Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } else if terr.Err != syscall.EXDEV { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + + return renameByCopy(src, dst) +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go new file mode 100644 index 000000000000..a377720a6f58 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go @@ -0,0 +1,69 @@ +//go:build windows + +/* +Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } + + if terr.Err != syscall.EXDEV { + // In windows it can drop down to an operating system call that + // returns an operating system error with a different number and + // message. Checking for that as a fall back. + noerr, ok := terr.Err.(syscall.Errno) + + // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. + // See https://msdn.microsoft.com/en-us/library/cc231199.aspx + if ok && noerr != 0x11 { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + } + + return renameByCopy(src, dst) +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go new file mode 100644 index 000000000000..ae62d0e6f89b --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go @@ -0,0 +1,178 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "sort" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +// deploymentutil contains a copy of a few functions from Kubernetes controller code to avoid a dependency on k8s.io/kubernetes. +// This code is copied from https://github.com/kubernetes/kubernetes/blob/e856613dd5bb00bcfaca6974431151b5c06cbed5/pkg/controller/deployment/util/deployment_util.go +// No changes to the code were made other than removing some unused functions + +// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions. +type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error) + +// ListReplicaSets returns a slice of RSes the given deployment targets. +// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), +// because only the controller itself should do that. +// However, it does filter out anything whose ControllerRef doesn't match. +func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. + namespace := deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + all, err := getRSList(namespace, options) + if err != nil { + return nil, err + } + // Only include those whose ControllerRef matches the Deployment. + owned := make([]*apps.ReplicaSet, 0, len(all)) + for _, rs := range all { + if metav1.IsControlledBy(rs, deployment) { + owned = append(owned, rs) + } + } + return owned, nil +} + +// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet + +func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet { + sort.Sort(ReplicaSetsByCreationTimestamp(rsList)) + for i := range rsList { + if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new ReplicaSets that have the same template as its template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new ReplicaSet. + return rsList[i] + } + } + // new ReplicaSet does not exist. + return nil +} + +// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. +// Returns nil if the new replica set doesn't exist yet. +func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) { + rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) + if err != nil { + return nil, err + } + return FindNewReplicaSet(deployment, rsList), nil +} + +// RsListFromClient returns an rsListFunc that wraps the given client. +func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { + rsList, err := c.ReplicaSets(namespace).List(context.Background(), options) + if err != nil { + return nil, err + } + var ret []*apps.ReplicaSet + for i := range rsList.Items { + ret = append(ret, &rsList.Items[i]) + } + return ret, err + } +} + +// IsRollingUpdate returns true if the strategy type is a rolling update. +func IsRollingUpdate(deployment *apps.Deployment) bool { + return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType +} + +// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. +func MaxUnavailable(deployment apps.Deployment) int32 { + if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + if maxUnavailable > *deployment.Spec.Replicas { + return *deployment.Spec.Replicas + } + return maxUnavailable +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go new file mode 100644 index 000000000000..8b9d4329fe9b --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "os" + + "github.com/pkg/errors" +) + +// Options represents configurable options used to create client and server TLS configurations. +type Options struct { + CaCertFile string + // If either the KeyFile or CertFile is empty, ClientConfig() will not load them. + KeyFile string + CertFile string + // Client-only options + InsecureSkipVerify bool +} + +// ClientConfig returns a TLS configuration for use by a Helm client. +func ClientConfig(opts Options) (cfg *tls.Config, err error) { + var cert *tls.Certificate + var pool *x509.CertPool + + if opts.CertFile != "" || opts.KeyFile != "" { + if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil { + if os.IsNotExist(err) { + return nil, errors.Wrapf(err, "could not load x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) + } + return nil, errors.Wrapf(err, "could not read x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) + } + } + if !opts.InsecureSkipVerify && opts.CaCertFile != "" { + if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil { + return nil, err + } + } + + cfg = &tls.Config{InsecureSkipVerify: opts.InsecureSkipVerify, Certificates: []tls.Certificate{*cert}, RootCAs: pool} + return cfg, nil +} diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go new file mode 100644 index 000000000000..7cd1dace9682 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "os" + + "github.com/pkg/errors" +) + +// NewClientTLS returns tls.Config appropriate for client auth. +func NewClientTLS(certFile, keyFile, caFile string, insecureSkipTLSverify bool) (*tls.Config, error) { + config := tls.Config{ + InsecureSkipVerify: insecureSkipTLSverify, + } + + if certFile != "" && keyFile != "" { + cert, err := CertFromFilePair(certFile, keyFile) + if err != nil { + return nil, err + } + config.Certificates = []tls.Certificate{*cert} + } + + if caFile != "" { + cp, err := CertPoolFromFile(caFile) + if err != nil { + return nil, err + } + config.RootCAs = cp + } + + return &config, nil +} + +// CertPoolFromFile returns an x509.CertPool containing the certificates +// in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertPoolFromFile(filename string) (*x509.CertPool, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, errors.Errorf("can't read CA file: %v", filename) + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, errors.Errorf("failed to append certificates from file: %s", filename) + } + return cp, nil +} + +// CertFromFilePair returns a tls.Certificate containing the +// certificates public/private key pair from a pair of given PEM-encoded files. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertFromFilePair(certFile, keyFile string) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, errors.Wrapf(err, "can't load key pair from cert %s and key %s", certFile, keyFile) + } + return &cert, err +} diff --git a/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go new file mode 100644 index 000000000000..a8cf7398c07f --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go @@ -0,0 +1,73 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package urlutil + +import ( + "net/url" + "path" + "path/filepath" +) + +// URLJoin joins a base URL to one or more path components. +// +// It's like filepath.Join for URLs. If the baseURL is pathish, this will still +// perform a join. +// +// If the URL is unparsable, this returns an error. +func URLJoin(baseURL string, paths ...string) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + // We want path instead of filepath because path always uses /. + all := []string{u.Path} + all = append(all, paths...) + u.Path = path.Join(all...) + return u.String(), nil +} + +// Equal normalizes two URLs and then compares for equality. +func Equal(a, b string) bool { + au, err := url.Parse(a) + if err != nil { + a = filepath.Clean(a) + b = filepath.Clean(b) + // If urls are paths, return true only if they are an exact match + return a == b + } + bu, err := url.Parse(b) + if err != nil { + return false + } + + for _, u := range []*url.URL{au, bu} { + if u.Path == "" { + u.Path = "/" + } + u.Path = filepath.Clean(u.Path) + } + return au.String() == bu.String() +} + +// ExtractHostname returns hostname from URL +func ExtractHostname(addr string) (string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", err + } + return u.Hostname(), nil +} diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go new file mode 100644 index 000000000000..6f6f319b01a5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -0,0 +1,81 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version // import "helm.sh/helm/v3/internal/version" + +import ( + "flag" + "runtime" + "strings" +) + +var ( + // version is the current version of Helm. + // Update this whenever making a new release. + // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata] + // + // Increment major number for new feature additions and behavioral changes. + // Increment minor number for bug fixes and performance enhancements. + version = "v3.18" + + // metadata is extra build time data + metadata = "" + // gitCommit is the git sha1 + gitCommit = "" + // gitTreeState is the state of the git tree + gitTreeState = "" +) + +// BuildInfo describes the compile time information. +type BuildInfo struct { + // Version is the current semver. + Version string `json:"version,omitempty"` + // GitCommit is the git sha1. + GitCommit string `json:"git_commit,omitempty"` + // GitTreeState is the state of the git tree. + GitTreeState string `json:"git_tree_state,omitempty"` + // GoVersion is the version of the Go compiler used. + GoVersion string `json:"go_version,omitempty"` +} + +// GetVersion returns the semver string of the version +func GetVersion() string { + if metadata == "" { + return version + } + return version + "+" + metadata +} + +// GetUserAgent returns a user agent for user with an HTTP client +func GetUserAgent() string { + return "Helm/" + strings.TrimPrefix(GetVersion(), "v") +} + +// Get returns build info +func Get() BuildInfo { + v := BuildInfo{ + Version: GetVersion(), + GitCommit: gitCommit, + GitTreeState: gitTreeState, + GoVersion: runtime.Version(), + } + + // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output + if flag.Lookup("test.v") != nil { + v.GoVersion = "" + } + return v +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/action.go b/vendor/helm.sh/helm/v3/pkg/action/action.go new file mode 100644 index 000000000000..9aaf64ca466d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/action.go @@ -0,0 +1,437 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + "helm.sh/helm/v3/pkg/time" +) + +// Timestamper is a function capable of producing a timestamp.Timestamper. +// +// By default, this is a time.Time function from the Helm time package. This can +// be overridden for testing though, so that timestamps are predictable. +var Timestamper = time.Now + +var ( + // errMissingChart indicates that a chart was not provided. + errMissingChart = errors.New("no chart provided") + // errMissingRelease indicates that a release (name) was not provided. + errMissingRelease = errors.New("no release provided") + // errInvalidRevision indicates that an invalid release revision number was provided. + errInvalidRevision = errors.New("invalid release revision") + // errPending indicates that another instance of Helm is already applying an operation on a release. + errPending = errors.New("another operation (install/upgrade/rollback) is in progress") +) + +// ValidName is a regular expression for resource names. +// +// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See +// pkg/lint/rules.validateMetadataNameFunc for the replacement. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +// Configuration injects the dependencies that all actions share. +type Configuration struct { + // RESTClientGetter is an interface that loads Kubernetes clients. + RESTClientGetter RESTClientGetter + + // Releases stores records of releases. + Releases *storage.Storage + + // KubeClient is a Kubernetes API client. + KubeClient kube.Interface + + // RegistryClient is a client for working with registries + RegistryClient *registry.Client + + // Capabilities describes the capabilities of the Kubernetes cluster. + Capabilities *chartutil.Capabilities + + Log func(string, ...interface{}) + + // HookOutputFunc called with container name and returns and expects writer that will receive the log output. + HookOutputFunc func(namespace, pod, container string) io.Writer +} + +// renderResources renders the templates in a chart +// +// TODO: This function is badly in need of a refactor. +// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed +// +// This code has to do with writing files to disk. +func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) { + hs := []*release.Hook{} + b := bytes.NewBuffer(nil) + + caps, err := cfg.getCapabilities() + if err != nil { + return hs, b, "", err + } + + if ch.Metadata.KubeVersion != "" { + if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) { + return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String()) + } + } + + var files map[string]string + var err2 error + + // A `helm template` should not talk to the remote cluster. However, commands with the flag + // `--dry-run` with the value of `false`, `none`, or `server` should try to interact with the cluster. + // It may break in interesting and exotic ways because other data (e.g. discovery) is mocked. + if interactWithRemote && cfg.RESTClientGetter != nil { + restConfig, err := cfg.RESTClientGetter.ToRESTConfig() + if err != nil { + return hs, b, "", err + } + e := engine.New(restConfig) + e.EnableDNS = enableDNS + files, err2 = e.Render(ch, values) + } else { + var e engine.Engine + e.EnableDNS = enableDNS + files, err2 = e.Render(ch, values) + } + + if err2 != nil { + return hs, b, "", err2 + } + + // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource, + // pull it out of here into a separate file so that we can actually use the output of the rendered + // text file. We have to spin through this map because the file contains path information, so we + // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip + // it in the sortHooks. + var notesBuffer bytes.Buffer + for k, v := range files { + if strings.HasSuffix(k, notesFileSuffix) { + if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) { + // If buffer contains data, add newline before adding more + if notesBuffer.Len() > 0 { + notesBuffer.WriteString("\n") + } + notesBuffer.WriteString(v) + } + delete(files, k) + } + } + notes := notesBuffer.String() + + // Sort hooks, manifests, and partials. Only hooks and manifests are returned, + // as partials are not used after renderer.Render. Empty manifests are also + // removed here. + hs, manifests, err := releaseutil.SortManifests(files, nil, releaseutil.InstallOrder) + if err != nil { + // By catching parse errors here, we can prevent bogus releases from going + // to Kubernetes. + // + // We return the files as a big blob of data to help the user debug parser + // errors. + for name, content := range files { + if strings.TrimSpace(content) == "" { + continue + } + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content) + } + return hs, b, "", err + } + + // Aggregate all valid manifests into one big doc. + fileWritten := make(map[string]bool) + + if includeCrds { + for _, crd := range ch.CRDObjects() { + if outputDir == "" { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Filename, string(crd.File.Data[:])) + } else { + err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Filename]) + if err != nil { + return hs, b, "", err + } + fileWritten[crd.Filename] = true + } + } + } + + for _, m := range manifests { + if outputDir == "" { + if hideSecret && m.Head.Kind == "Secret" && m.Head.Version == "v1" { + fmt.Fprintf(b, "---\n# Source: %s\n# HIDDEN: The Secret output has been suppressed\n", m.Name) + } else { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content) + } + } else { + newDir := outputDir + if useReleaseName { + newDir = filepath.Join(outputDir, releaseName) + } + // NOTE: We do not have to worry about the post-renderer because + // output dir is only used by `helm template`. In the next major + // release, we should move this logic to template only as it is not + // used by install or upgrade + err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name]) + if err != nil { + return hs, b, "", err + } + fileWritten[m.Name] = true + } + } + + if pr != nil { + b, err = pr.Run(b) + if err != nil { + return hs, b, notes, errors.Wrap(err, "error while running post render on files") + } + } + + return hs, b, notes, nil +} + +// RESTClientGetter gets the rest client +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +// DebugLog sets the logger that writes debug strings +type DebugLog func(format string, v ...interface{}) + +// capabilities builds a Capabilities from discovery information. +func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { + if cfg.Capabilities != nil { + return cfg.Capabilities, nil + } + dc, err := cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return nil, errors.Wrap(err, "could not get Kubernetes discovery client") + } + // force a discovery cache invalidation to always fetch the latest server version/capabilities. + dc.Invalidate() + kubeVersion, err := dc.ServerVersion() + if err != nil { + return nil, errors.Wrap(err, "could not get server version from Kubernetes") + } + // Issue #6361: + // Client-Go emits an error when an API service is registered but unimplemented. + // We trap that error here and print a warning. But since the discovery client continues + // building the API object, it is correctly populated with all valid APIs. + // See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642 + apiVersions, err := GetVersionSet(dc) + if err != nil { + if discovery.IsGroupDiscoveryFailedError(err) { + cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err) + cfg.Log("WARNING: To fix this, kubectl delete apiservice ") + } else { + return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + } + + cfg.Capabilities = &chartutil.Capabilities{ + APIVersions: apiVersions, + KubeVersion: chartutil.KubeVersion{ + Version: kubeVersion.GitVersion, + Major: kubeVersion.Major, + Minor: kubeVersion.Minor, + }, + HelmVersion: chartutil.DefaultCapabilities.HelmVersion, + } + return cfg.Capabilities, nil +} + +// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration +func (cfg *Configuration) KubernetesClientSet() (kubernetes.Interface, error) { + conf, err := cfg.RESTClientGetter.ToRESTConfig() + if err != nil { + return nil, errors.Wrap(err, "unable to generate config for kubernetes client") + } + + return kubernetes.NewForConfig(conf) +} + +// Now generates a timestamp +// +// If the configuration has a Timestamper on it, that will be used. +// Otherwise, this will use time.Now(). +func (cfg *Configuration) Now() time.Time { + return Timestamper() +} + +func (cfg *Configuration) releaseContent(name string, version int) (*release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name) + } + + if version <= 0 { + return cfg.Releases.Last(name) + } + + return cfg.Releases.Get(name, version) +} + +// GetVersionSet retrieves a set of available k8s API versions +func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) { + groups, resources, err := client.ServerGroupsAndResources() + if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { + return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + + // FIXME: The Kubernetes test fixture for cli appears to always return nil + // for calls to Discovery().ServerGroupsAndResources(). So in this case, we + // return the default API list. This is also a safe value to return in any + // other odd-ball case. + if len(groups) == 0 && len(resources) == 0 { + return chartutil.DefaultVersionSet, nil + } + + versionMap := make(map[string]interface{}) + var versions []string + + // Extract the groups + for _, g := range groups { + for _, gv := range g.Versions { + versionMap[gv.GroupVersion] = struct{}{} + } + } + + // Extract the resources + var id string + var ok bool + for _, r := range resources { + for _, rl := range r.APIResources { + + // A Kind at a GroupVersion can show up more than once. We only want + // it displayed once in the final output. + id = path.Join(r.GroupVersion, rl.Kind) + if _, ok = versionMap[id]; !ok { + versionMap[id] = struct{}{} + } + } + } + + // Convert to a form that NewVersionSet can use + for k := range versionMap { + versions = append(versions, k) + } + + return chartutil.VersionSet(versions), nil +} + +// recordRelease with an update operation in case reuse has been set. +func (cfg *Configuration) recordRelease(r *release.Release) { + if err := cfg.Releases.Update(r); err != nil { + cfg.Log("warning: Failed to update release %s: %s", r.Name, err) + } +} + +// Init initializes the action configuration +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { + kc := kube.New(getter) + kc.Log = log + + lazyClient := &lazyClient{ + namespace: namespace, + clientFn: kc.Factory.KubernetesClientSet, + } + + var store *storage.Storage + switch helmDriver { + case "secret", "secrets", "": + d := driver.NewSecrets(newSecretClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "configmap", "configmaps": + d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "memory": + var d *driver.Memory + if cfg.Releases != nil { + if mem, ok := cfg.Releases.Driver.(*driver.Memory); ok { + // This function can be called more than once (e.g., helm list --all-namespaces). + // If a memory driver was already initialized, re-use it but set the possibly new namespace. + // We re-use it in case some releases where already created in the existing memory driver. + d = mem + } + } + if d == nil { + d = driver.NewMemory() + } + d.SetNamespace(namespace) + store = storage.Init(d) + case "sql": + d, err := driver.NewSQL( + os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), + log, + namespace, + ) + if err != nil { + return errors.Wrap(err, "unable to instantiate SQL driver") + } + store = storage.Init(d) + default: + return errors.Errorf("unknown driver %q", helmDriver) + } + + cfg.RESTClientGetter = getter + cfg.KubeClient = kc + cfg.Releases = store + cfg.Log = log + cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard } + + return nil +} + +// SetHookOutputFunc sets the HookOutputFunc on the Configuration. +func (cfg *Configuration) SetHookOutputFunc(hookOutputFunc func(_, _, _ string) io.Writer) { + cfg.HookOutputFunc = hookOutputFunc +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/dependency.go b/vendor/helm.sh/helm/v3/pkg/action/dependency.go new file mode 100644 index 000000000000..19305fee8807 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/dependency.go @@ -0,0 +1,237 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/gosuri/uitable" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// Dependency is the action for building a given chart's dependency tree. +// +// It provides the implementation of 'helm dependency' and its respective subcommands. +type Dependency struct { + Verify bool + Keyring string + SkipRefresh bool + ColumnWidth uint + Username string + Password string + CertFile string + KeyFile string + CaFile string + InsecureSkipTLSverify bool + PlainHTTP bool +} + +// NewDependency creates a new Dependency object with the given configuration. +func NewDependency() *Dependency { + return &Dependency{ + ColumnWidth: 80, + } +} + +// List executes 'helm dependency list'. +func (d *Dependency) List(chartpath string, out io.Writer) error { + c, err := loader.Load(chartpath) + if err != nil { + return err + } + + if c.Metadata.Dependencies == nil { + fmt.Fprintf(out, "WARNING: no dependencies at %s\n", filepath.Join(chartpath, "charts")) + return nil + } + + d.printDependencies(chartpath, out, c) + fmt.Fprintln(out) + d.printMissing(chartpath, out, c.Metadata.Dependencies) + return nil +} + +// dependencyStatus returns a string describing the status of a dependency viz a viz the parent chart. +func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, parent *chart.Chart) string { + filename := fmt.Sprintf("%s-%s.tgz", dep.Name, "*") + + // If a chart is unpacked, this will check the unpacked chart's `charts/` directory for tarballs. + // Technically, this is COMPLETELY unnecessary, and should be removed in Helm 4. It is here + // to preserved backward compatibility. In Helm 2/3, there is a "difference" between + // the tgz version (which outputs "ok" if it unpacks) and the loaded version (which outputs + // "unpacked"). Early in Helm 2's history, this would have made a difference. But it no + // longer does. However, since this code shipped with Helm 3, the output must remain stable + // until Helm 4. + switch archives, err := filepath.Glob(filepath.Join(chartpath, "charts", filename)); { + case err != nil: + return "bad pattern" + case len(archives) > 1: + // See if the second part is a SemVer + found := []string{} + for _, arc := range archives { + // we need to trip the prefix dirs and the extension off. + filename = strings.TrimSuffix(filepath.Base(arc), ".tgz") + maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name)) + + if _, err := semver.StrictNewVersion(maybeVersion); err == nil { + // If the version parsed without an error, it is possibly a valid + // version. + found = append(found, arc) + } + } + + if l := len(found); l == 1 { + // If we get here, we do the same thing as in len(archives) == 1. + if r := statArchiveForStatus(found[0], dep); r != "" { + return r + } + + // Fall through and look for directories + } else if l > 1 { + return "too many matches" + } + + // The sanest thing to do here is to fall through and see if we have any directory + // matches. + + case len(archives) == 1: + archive := archives[0] + if r := statArchiveForStatus(archive, dep); r != "" { + return r + } + + } + // End unnecessary code. + + var depChart *chart.Chart + for _, item := range parent.Dependencies() { + if item.Name() == dep.Name { + depChart = item + } + } + + if depChart == nil { + return "missing" + } + + if depChart.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(depChart.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + + return "unpacked" +} + +// stat an archive and return a message if the stat is successful +// +// This is a refactor of the code originally in dependencyStatus. It is here to +// support legacy behavior, and should be removed in Helm 4. +func statArchiveForStatus(archive string, dep *chart.Dependency) string { + if _, err := os.Stat(archive); err == nil { + c, err := loader.Load(archive) + if err != nil { + return "corrupt" + } + if c.Name() != dep.Name { + return "misnamed" + } + + if c.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(c.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + return "ok" + } + return "" +} + +// printDependencies prints all of the dependencies in the yaml file. +func (d *Dependency) printDependencies(chartpath string, out io.Writer, c *chart.Chart) { + table := uitable.New() + table.MaxColWidth = d.ColumnWidth + table.AddRow("NAME", "VERSION", "REPOSITORY", "STATUS") + for _, row := range c.Metadata.Dependencies { + table.AddRow(row.Name, row.Version, row.Repository, d.dependencyStatus(chartpath, row, c)) + } + fmt.Fprintln(out, table) +} + +// printMissing prints warnings about charts that are present on disk, but are +// not in Chart.yaml. +func (d *Dependency) printMissing(chartpath string, out io.Writer, reqs []*chart.Dependency) { + folder := filepath.Join(chartpath, "charts/*") + files, err := filepath.Glob(folder) + if err != nil { + fmt.Fprintln(out, err) + return + } + + for _, f := range files { + fi, err := os.Stat(f) + if err != nil { + fmt.Fprintf(out, "Warning: %s\n", err) + } + // Skip anything that is not a directory and not a tgz file. + if !fi.IsDir() && filepath.Ext(f) != ".tgz" { + continue + } + c, err := loader.Load(f) + if err != nil { + fmt.Fprintf(out, "WARNING: %q is not a chart.\n", f) + continue + } + found := false + for _, d := range reqs { + if d.Name == c.Name() { + found = true + break + } + } + if !found { + fmt.Fprintf(out, "WARNING: %q is not in Chart.yaml.\n", f) + } + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/doc.go b/vendor/helm.sh/helm/v3/pkg/action/doc.go new file mode 100644 index 000000000000..3c91bd61848c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/doc.go @@ -0,0 +1,22 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package action contains the logic for each action that Helm can perform. +// +// This is a library for calling top-level Helm actions like 'install', +// 'upgrade', or 'list'. Actions approximately match the command line +// invocations that the Helm client uses. +package action diff --git a/vendor/helm.sh/helm/v3/pkg/action/get.go b/vendor/helm.sh/helm/v3/pkg/action/get.go new file mode 100644 index 000000000000..f44b53307fb6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get.go @@ -0,0 +1,47 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/release" +) + +// Get is the action for checking a given release's information. +// +// It provides the implementation of 'helm get' and its respective subcommands (except `helm get values`). +type Get struct { + cfg *Configuration + + // Initializing Version to 0 will get the latest revision of the release. + Version int +} + +// NewGet creates a new Get object with the given configuration. +func NewGet(cfg *Configuration) *Get { + return &Get{ + cfg: cfg, + } +} + +// Run executes 'helm get' against the given release. +func (g *Get) Run(name string) (*release.Release, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + return g.cfg.releaseContent(name, g.Version) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go b/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go new file mode 100644 index 000000000000..f79788c3bbc0 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go @@ -0,0 +1,90 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "sort" + "strings" + "time" + + "helm.sh/helm/v3/pkg/chart" +) + +// GetMetadata is the action for checking a given release's metadata. +// +// It provides the implementation of 'helm get metadata'. +type GetMetadata struct { + cfg *Configuration + + Version int +} + +type Metadata struct { + Name string `json:"name" yaml:"name"` + Chart string `json:"chart" yaml:"chart"` + Version string `json:"version" yaml:"version"` + AppVersion string `json:"appVersion" yaml:"appVersion"` + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + Dependencies []*chart.Dependency `json:"dependencies,omitempty" yaml:"dependencies,omitempty"` + Namespace string `json:"namespace" yaml:"namespace"` + Revision int `json:"revision" yaml:"revision"` + Status string `json:"status" yaml:"status"` + DeployedAt string `json:"deployedAt" yaml:"deployedAt"` +} + +// NewGetMetadata creates a new GetMetadata object with the given configuration. +func NewGetMetadata(cfg *Configuration) *GetMetadata { + return &GetMetadata{ + cfg: cfg, + } +} + +// Run executes 'helm get metadata' against the given release. +func (g *GetMetadata) Run(name string) (*Metadata, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + rel, err := g.cfg.releaseContent(name, g.Version) + if err != nil { + return nil, err + } + + return &Metadata{ + Name: rel.Name, + Chart: rel.Chart.Metadata.Name, + Version: rel.Chart.Metadata.Version, + AppVersion: rel.Chart.Metadata.AppVersion, + Dependencies: rel.Chart.Metadata.Dependencies, + Annotations: rel.Chart.Metadata.Annotations, + Namespace: rel.Namespace, + Revision: rel.Version, + Status: rel.Info.Status.String(), + DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339), + }, nil +} + +// FormattedDepNames formats metadata.dependencies names into a comma-separated list. +func (m *Metadata) FormattedDepNames() string { + depsNames := make([]string, 0, len(m.Dependencies)) + for _, dep := range m.Dependencies { + depsNames = append(depsNames, dep.Name) + } + sort.StringSlice(depsNames).Sort() + + return strings.Join(depsNames, ",") +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_values.go b/vendor/helm.sh/helm/v3/pkg/action/get_values.go new file mode 100644 index 000000000000..9c32db213281 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get_values.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/chartutil" +) + +// GetValues is the action for checking a given release's values. +// +// It provides the implementation of 'helm get values'. +type GetValues struct { + cfg *Configuration + + Version int + AllValues bool +} + +// NewGetValues creates a new GetValues object with the given configuration. +func NewGetValues(cfg *Configuration) *GetValues { + return &GetValues{ + cfg: cfg, + } +} + +// Run executes 'helm get values' against the given release. +func (g *GetValues) Run(name string) (map[string]interface{}, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + rel, err := g.cfg.releaseContent(name, g.Version) + if err != nil { + return nil, err + } + + // If the user wants all values, compute the values and return. + if g.AllValues { + cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config) + if err != nil { + return nil, err + } + return cfg, nil + } + return rel.Config, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/history.go b/vendor/helm.sh/helm/v3/pkg/action/history.go new file mode 100644 index 000000000000..0430aaf7a5d5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/history.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// History is the action for checking the release's ledger. +// +// It provides the implementation of 'helm history'. +// It returns all the revisions for a specific release. +// To list up to one revision of every release in one specific, or in all, +// namespaces, see the List action. +type History struct { + cfg *Configuration + + Max int + Version int +} + +// NewHistory creates a new History object with the given configuration. +func NewHistory(cfg *Configuration) *History { + return &History{ + cfg: cfg, + } +} + +// Run executes 'helm history' against the given release. +func (h *History) Run(name string) ([]*release.Release, error) { + if err := h.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + + h.cfg.Log("getting history for release %s", name) + return h.cfg.Releases.History(name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/vendor/helm.sh/helm/v3/pkg/action/hooks.go new file mode 100644 index 000000000000..16cc13bdd581 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/hooks.go @@ -0,0 +1,230 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "log" + "slices" + "sort" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pkg/errors" + "gopkg.in/yaml.v3" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// execHook executes all of the hooks for the given hook event. +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { + executingHooks := []*release.Hook{} + + for _, h := range rl.Hooks { + for _, e := range h.Events { + if e == hook { + executingHooks = append(executingHooks, h) + } + } + } + + // hooke are pre-ordered by kind, so keep order stable + sort.Stable(hookByWeight(executingHooks)) + + for _, h := range executingHooks { + // Set default delete policy to before-hook-creation + if len(h.DeletePolicies) == 0 { + // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion + // resources. For all other resource types update in place if a + // resource with the same name already exists and is owned by the + // current release. + h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} + } + + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { + return err + } + + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) + } + + // Record the time at which the hook was applied to the cluster + h.LastRun = release.HookExecution{ + StartedAt: helmtime.Now(), + Phase: release.HookPhaseRunning, + } + cfg.recordRelease(rl) + + // As long as the implementation of WatchUntilReady does not panic, HookPhaseFailed or HookPhaseSucceeded + // should always be set by this function. If we fail to do that for any reason, then HookPhaseUnknown is + // the most appropriate value to surface. + h.LastRun.Phase = release.HookPhaseUnknown + + // Create hook resources + if _, err := cfg.KubeClient.Create(resources); err != nil { + h.LastRun.CompletedAt = helmtime.Now() + h.LastRun.Phase = release.HookPhaseFailed + return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) + } + + // Watch hook resources until they have completed + err = cfg.KubeClient.WatchUntilReady(resources, timeout) + // Note the time of success/failure + h.LastRun.CompletedAt = helmtime.Now() + // Mark hook as succeeded or failed + if err != nil { + h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side + if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error outputting logs for hook failure: %v", errOutputting) + } + // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted + // under failed condition. If so, then clear the corresponding resource object in the hook + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) + } + return err + } + h.LastRun.Phase = release.HookPhaseSucceeded + } + + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + return err + } + } + + return nil +} + +// hookByWeight is a sorter for hooks +type hookByWeight []*release.Hook + +func (x hookByWeight) Len() int { return len(x) } +func (x hookByWeight) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x hookByWeight) Less(i, j int) bool { + if x[i].Weight == x[j].Weight { + return x[i].Name < x[j].Name + } + return x[i].Weight < x[j].Weight +} + +// deleteHookByPolicy deletes a hook if the hook policy instructs it to +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { + // Never delete CustomResourceDefinitions; this could cause lots of + // cascading garbage collection. + if h.Kind == "CustomResourceDefinition" { + return nil + } + if hookHasDeletePolicy(h, policy) { + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for deleting hook %s", h.Path) + } + _, errs := cfg.KubeClient.Delete(resources) + if len(errs) > 0 { + return errors.New(joinErrors(errs)) + } + + // wait for resources until they are deleted to avoid conflicts + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(resources, timeout); err != nil { + return err + } + } + } + return nil +} + +// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices +// supported by helm. If so, mark the hook as one should be deleted. +func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool { + for _, v := range h.DeletePolicies { + if policy == v { + return true + } + } + return false +} + +// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to +func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error { + if !hookHasOutputLogPolicy(h, policy) { + return nil + } + namespace, err := cfg.deriveNamespace(h, releaseNamespace) + if err != nil { + return err + } + switch h.Kind { + case "Job": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) + case "Pod": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) + default: + return nil + } +} + +func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error { + // TODO Helm 4: Remove this check when GetPodList and OutputContainerLogsForPodList are moved from InterfaceLogs to Interface + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceLogs); ok { + podList, err := kubeClient.GetPodList(namespace, listOptions) + if err != nil { + return err + } + err = kubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc) + return err + } + return nil +} + +func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) { + tmp := struct { + Metadata struct { + Namespace string + } + }{} + err := yaml.Unmarshal([]byte(h.Manifest), &tmp) + if err != nil { + return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path) + } + if tmp.Metadata.Namespace == "" { + return namespace, nil + } + return tmp.Metadata.Namespace, nil +} + +// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies +// supported by helm. +func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { + return slices.Contains(h.OutputLogPolicies, policy) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go new file mode 100644 index 000000000000..7bdfc2ab5288 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -0,0 +1,836 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "sync" + "text/template" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/kube" + kubefake "helm.sh/helm/v3/pkg/kube/fake" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/repo" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// notesFileSuffix that we want to treat special. It goes through the templating engine +// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually +// wants to see this file after rendering in the status command. However, it must be a suffix +// since there can be filepath in front of it. +const notesFileSuffix = "NOTES.txt" + +const defaultDirectoryPermission = 0755 + +// Install performs an installation operation. +type Install struct { + cfg *Configuration + + ChartPathOptions + + ClientOnly bool + Force bool + CreateNamespace bool + DryRun bool + DryRunOption string + // HideSecret can be set to true when DryRun is enabled in order to hide + // Kubernetes Secrets in the output. It cannot be used outside of DryRun. + HideSecret bool + DisableHooks bool + Replace bool + Wait bool + WaitForJobs bool + Devel bool + DependencyUpdate bool + Timeout time.Duration + Namespace string + ReleaseName string + GenerateName bool + NameTemplate string + Description string + OutputDir string + Atomic bool + SkipCRDs bool + SubNotes bool + HideNotes bool + SkipSchemaValidation bool + DisableOpenAPIValidation bool + IncludeCRDs bool + Labels map[string]string + // KubeVersion allows specifying a custom kubernetes version to use and + // APIVersions allows a manual set of supported API Versions to be passed + // (for things like templating). These are ignored if ClientOnly is false + KubeVersion *chartutil.KubeVersion + APIVersions chartutil.VersionSet + // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false + IsUpgrade bool + // Enable DNS lookups when rendering templates + EnableDNS bool + // Used by helm template to add the release as part of OutputDir path + // OutputDir/ + UseReleaseName bool + // TakeOwnership will ignore the check for helm annotations and take ownership of the resources. + TakeOwnership bool + PostRenderer postrender.PostRenderer + // Lock to control raceconditions when the process receives a SIGTERM + Lock sync.Mutex +} + +// ChartPathOptions captures common options used for controlling chart paths +type ChartPathOptions struct { + CaFile string // --ca-file + CertFile string // --cert-file + KeyFile string // --key-file + InsecureSkipTLSverify bool // --insecure-skip-verify + PlainHTTP bool // --plain-http + Keyring string // --keyring + Password string // --password + PassCredentialsAll bool // --pass-credentials + RepoURL string // --repo + Username string // --username + Verify bool // --verify + Version string // --version + + // registryClient provides a registry client but is not added with + // options from a flag + registryClient *registry.Client +} + +// NewInstall creates a new Install object with the given configuration. +func NewInstall(cfg *Configuration) *Install { + in := &Install{ + cfg: cfg, + } + in.ChartPathOptions.registryClient = cfg.RegistryClient + + return in +} + +// SetRegistryClient sets the registry client for the install action +func (i *Install) SetRegistryClient(registryClient *registry.Client) { + i.ChartPathOptions.registryClient = registryClient +} + +// GetRegistryClient get the registry client. +func (i *Install) GetRegistryClient() *registry.Client { + return i.ChartPathOptions.registryClient +} + +func (i *Install) installCRDs(crds []chart.CRD) error { + // We do these one file at a time in the order they were read. + totalItems := []*resource.Info{} + for _, obj := range crds { + // Read in the resources + res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false) + if err != nil { + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + + // Send them to Kube + if _, err := i.cfg.KubeClient.Create(res); err != nil { + // If the error is CRD already exists, continue. + if apierrors.IsAlreadyExists(err) { + crdName := res[0].Name + i.cfg.Log("CRD %s is already present. Skipping.", crdName) + continue + } + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + totalItems = append(totalItems, res...) + } + if len(totalItems) > 0 { + // Give time for the CRD to be recognized. + if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { + return err + } + + // If we have already gathered the capabilities, we need to invalidate + // the cache so that the new CRDs are recognized. This should only be + // the case when an action configuration is reused for multiple actions, + // as otherwise it is later loaded by ourselves when getCapabilities + // is called later on in the installation process. + if i.cfg.Capabilities != nil { + discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return err + } + + i.cfg.Log("Clearing discovery cache") + discoveryClient.Invalidate() + + _, _ = discoveryClient.ServerGroups() + } + + // Invalidate the REST mapper, since it will not have the new CRDs + // present. + restMapper, err := i.cfg.RESTClientGetter.ToRESTMapper() + if err != nil { + return err + } + if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { + i.cfg.Log("Clearing REST mapper cache") + resettable.Reset() + } + } + return nil +} + +// Run executes the installation +// +// If DryRun is set to true, this will prepare the release, but not install it + +func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + ctx := context.Background() + return i.RunWithContext(ctx, chrt, vals) +} + +// Run executes the installation with Context +// +// When the task is cancelled through ctx, the function returns and the install +// proceeds in the background. +func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) + if !i.ClientOnly { + if err := i.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + } + + // HideSecret must be used with dry run. Otherwise, return an error. + if !i.isDryRun() && i.HideSecret { + return nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") + } + + if err := i.availableName(); err != nil { + return nil, err + } + + if err := chartutil.ProcessDependenciesWithMerge(chrt, vals); err != nil { + return nil, err + } + + var interactWithRemote bool + if !i.isDryRun() || i.DryRunOption == "server" || i.DryRunOption == "none" || i.DryRunOption == "false" { + interactWithRemote = true + } + + // Pre-install anything in the crd/ directory. We do this before Helm + // contacts the upstream server and builds the capabilities object. + if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { + // On dry run, bail here + if i.isDryRun() { + i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") + } else if err := i.installCRDs(crds); err != nil { + return nil, err + } + } + + if i.ClientOnly { + // Add mock objects in here so it doesn't use Kube API server + // NOTE(bacongobbler): used for `helm template` + i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy() + if i.KubeVersion != nil { + i.cfg.Capabilities.KubeVersion = *i.KubeVersion + } + i.cfg.Capabilities.APIVersions = append(i.cfg.Capabilities.APIVersions, i.APIVersions...) + i.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard} + + mem := driver.NewMemory() + mem.SetNamespace(i.Namespace) + i.cfg.Releases = storage.Init(mem) + } else if !i.ClientOnly && len(i.APIVersions) > 0 { + i.cfg.Log("API Version list given outside of client only mode, this list will be ignored") + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + i.Wait = i.Wait || i.Atomic + + caps, err := i.cfg.getCapabilities() + if err != nil { + return nil, err + } + + // special case for helm template --is-upgrade + isUpgrade := i.IsUpgrade && i.isDryRun() + options := chartutil.ReleaseOptions{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Revision: 1, + IsInstall: !isUpgrade, + IsUpgrade: isUpgrade, + } + valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation) + if err != nil { + return nil, err + } + + if driver.ContainsSystemLabels(i.Labels) { + return nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + } + + rel := i.createRelease(chrt, vals, i.Labels) + + var manifestDoc *bytes.Buffer + rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, interactWithRemote, i.EnableDNS, i.HideSecret) + // Even for errors, attach this if available + if manifestDoc != nil { + rel.Manifest = manifestDoc.String() + } + // Check error from render + if err != nil { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error())) + // Return a release with partial data so that the client can show debugging information. + return rel, err + } + + // Mark this release as in-progress + rel.SetStatus(release.StatusPendingInstall, "Initial install underway") + + var toBeAdopted kube.ResourceList + resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation) + if err != nil { + return nil, errors.Wrap(err, "unable to build kubernetes objects from release manifest") + } + + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true)) + if err != nil { + return nil, err + } + + // Install requires an extra validation step of checking that resources + // don't already exist before we actually create resources. If we continue + // forward and create the release object with resources that already exist, + // we'll end up in a state where we will delete those resources upon + // deleting the release because the manifest will be pointing at that + // resource + if !i.ClientOnly && !isUpgrade && len(resources) > 0 { + if i.TakeOwnership { + toBeAdopted, err = requireAdoption(resources) + } else { + toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace) + } + if err != nil { + return nil, errors.Wrap(err, "Unable to continue with install") + } + } + + // Bail out here if it is a dry run + if i.isDryRun() { + rel.Info.Description = "Dry run complete" + return rel, nil + } + + if i.CreateNamespace { + ns := &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: i.Namespace, + Labels: map[string]string{ + "name": i.Namespace, + }, + }, + } + buf, err := yaml.Marshal(ns) + if err != nil { + return nil, err + } + resourceList, err := i.cfg.KubeClient.Build(bytes.NewBuffer(buf), true) + if err != nil { + return nil, err + } + if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + } + + // If Replace is true, we need to supersede the last release. + if i.Replace { + if err := i.replaceRelease(rel); err != nil { + return nil, err + } + } + + // Store the release in history before continuing (new in Helm 3). We always know + // that this is a create operation. + if err := i.cfg.Releases.Create(rel); err != nil { + // We could try to recover gracefully here, but since nothing has been installed + // yet, this is probably safer than trying to continue when we know storage is + // not working. + return rel, err + } + + rel, err = i.performInstallCtx(ctx, rel, toBeAdopted, resources) + if err != nil { + rel, err = i.failRelease(rel, err) + } + return rel, err +} + +func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { + type Msg struct { + r *release.Release + e error + } + resultChan := make(chan Msg, 1) + + go func() { + rel, err := i.performInstall(rel, toBeAdopted, resources) + resultChan <- Msg{rel, err} + }() + select { + case <-ctx.Done(): + err := ctx.Err() + return rel, err + case msg := <-resultChan: + return msg.r, msg.e + } +} + +// isDryRun returns true if Upgrade is set to run as a DryRun +func (i *Install) isDryRun() bool { + if i.DryRun || i.DryRunOption == "client" || i.DryRunOption == "server" || i.DryRunOption == "true" { + return true + } + return false +} + +func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { + var err error + // pre-install hooks + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + return rel, fmt.Errorf("failed pre-install: %s", err) + } + } + + // At this point, we can do the install. Note that before we were detecting whether to + // do an update, but it's not clear whether we WANT to do an update if the re-use is set + // to true, since that is basically an upgrade operation. + if len(toBeAdopted) == 0 && len(resources) > 0 { + _, err = i.cfg.KubeClient.Create(resources) + } else if len(resources) > 0 { + if i.TakeOwnership { + _, err = i.cfg.KubeClient.(kube.InterfaceThreeWayMerge).UpdateThreeWayMerge(toBeAdopted, resources, i.Force) + } else { + _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.Force) + } + } + if err != nil { + return rel, err + } + + if i.Wait { + if i.WaitForJobs { + err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + } else { + err = i.cfg.KubeClient.Wait(resources, i.Timeout) + } + if err != nil { + return rel, err + } + } + + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + return rel, fmt.Errorf("failed post-install: %s", err) + } + } + + if len(i.Description) > 0 { + rel.SetStatus(release.StatusDeployed, i.Description) + } else { + rel.SetStatus(release.StatusDeployed, "Install complete") + } + + // This is a tricky case. The release has been created, but the result + // cannot be recorded. The truest thing to tell the user is that the + // release was created. However, the user will not be able to do anything + // further with this release. + // + // One possible strategy would be to do a timed retry to see if we can get + // this stored in the future. + if err := i.recordRelease(rel); err != nil { + i.cfg.Log("failed to record the release: %s", err) + } + + return rel, nil +} + +func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) + if i.Atomic { + i.cfg.Log("Install failed and atomic is set, uninstalling release") + uninstall := NewUninstall(i.cfg) + uninstall.DisableHooks = i.DisableHooks + uninstall.KeepHistory = false + uninstall.Timeout = i.Timeout + if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { + return rel, errors.Wrapf(uninstallErr, "an error occurred while uninstalling the release. original install error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been uninstalled due to atomic being set", i.ReleaseName) + } + i.recordRelease(rel) // Ignore the error, since we have another error to deal with. + return rel, err +} + +// availableName tests whether a name is available +// +// Roughly, this will return an error if name is +// +// - empty +// - too long +// - already in use, and not deleted +// - used by a deleted release, and i.Replace is false +func (i *Install) availableName() error { + start := i.ReleaseName + + if err := chartutil.ValidateReleaseName(start); err != nil { + return errors.Wrapf(err, "release name %q", start) + } + // On dry run, bail here + if i.isDryRun() { + return nil + } + + h, err := i.cfg.Releases.History(start) + if err != nil || len(h) < 1 { + return nil + } + releaseutil.Reverse(h, releaseutil.SortByRevision) + rel := h[0] + + if st := rel.Info.Status; i.Replace && (st == release.StatusUninstalled || st == release.StatusFailed) { + return nil + } + return errors.New("cannot re-use a name that is still in use") +} + +// createRelease creates a new release object +func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release { + ts := i.cfg.Now() + return &release.Release{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Chart: chrt, + Config: rawVals, + Info: &release.Info{ + FirstDeployed: ts, + LastDeployed: ts, + Status: release.StatusUnknown, + }, + Version: 1, + Labels: labels, + } +} + +// recordRelease with an update operation in case reuse has been set. +func (i *Install) recordRelease(r *release.Release) error { + // This is a legacy function which has been reduced to a oneliner. Could probably + // refactor it out. + return i.cfg.Releases.Update(r) +} + +// replaceRelease replaces an older release with this one +// +// This allows us to re-use names by superseding an existing release with a new one +func (i *Install) replaceRelease(rel *release.Release) error { + hist, err := i.cfg.Releases.History(rel.Name) + if err != nil || len(hist) == 0 { + // No releases exist for this name, so we can return early + return nil + } + + releaseutil.Reverse(hist, releaseutil.SortByRevision) + last := hist[0] + + // Update version to the next available + rel.Version = last.Version + 1 + + // Do not change the status of a failed release. + if last.Info.Status == release.StatusFailed { + return nil + } + + // For any other status, mark it as superseded and store the old record + last.SetStatus(release.StatusSuperseded, "superseded by new release") + return i.recordRelease(last) +} + +// write the to /. controls if the file is created or content will be appended +func writeToFile(outputDir string, name string, data string, append bool) error { + outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) + + err := ensureDirectoryForFile(outfileName) + if err != nil { + return err + } + + f, err := createOrOpenFile(outfileName, append) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + + if err != nil { + return err + } + + fmt.Printf("wrote %s\n", outfileName) + return nil +} + +func createOrOpenFile(filename string, append bool) (*os.File, error) { + if append { + return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600) + } + return os.Create(filename) +} + +// check if the directory exists to create file. creates if doesn't exist +func ensureDirectoryForFile(file string) error { + baseDir := path.Dir(file) + _, err := os.Stat(baseDir) + if err != nil && !os.IsNotExist(err) { + return err + } + + return os.MkdirAll(baseDir, defaultDirectoryPermission) +} + +// NameAndChart returns the name and chart that should be used. +// +// This will read the flags and handle name generation if necessary. +func (i *Install) NameAndChart(args []string) (string, string, error) { + flagsNotSet := func() error { + if i.GenerateName { + return errors.New("cannot set --generate-name and also specify a name") + } + if i.NameTemplate != "" { + return errors.New("cannot set --name-template and also specify a name") + } + return nil + } + + if len(args) > 2 { + return args[0], args[1], errors.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", ")) + } + + if len(args) == 2 { + return args[0], args[1], flagsNotSet() + } + + if i.NameTemplate != "" { + name, err := TemplateName(i.NameTemplate) + return name, args[0], err + } + + if i.ReleaseName != "" { + return i.ReleaseName, args[0], nil + } + + if !i.GenerateName { + return "", args[0], errors.New("must either provide a name or specify --generate-name") + } + + base := filepath.Base(args[0]) + if base == "." || base == "" { + base = "chart" + } + // if present, strip out the file extension from the name + if idx := strings.Index(base, "."); idx != -1 { + base = base[0:idx] + } + + return fmt.Sprintf("%s-%d", base, time.Now().Unix()), args[0], nil +} + +// TemplateName renders a name template, returning the name or an error. +func TemplateName(nameTemplate string) (string, error) { + if nameTemplate == "" { + return "", nil + } + + t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate) + if err != nil { + return "", err + } + var b bytes.Buffer + if err := t.Execute(&b, nil); err != nil { + return "", err + } + + return b.String(), nil +} + +// CheckDependencies checks the dependencies for a chart. +func CheckDependencies(ch *chart.Chart, reqs []*chart.Dependency) error { + var missing []string + +OUTER: + for _, r := range reqs { + for _, d := range ch.Dependencies() { + if d.Name() == r.Name { + continue OUTER + } + } + missing = append(missing, r.Name) + } + + if len(missing) > 0 { + return errors.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) + } + return nil +} + +// LocateChart looks for a chart directory in known places, and returns either the full path or an error. +// +// This does not ensure that the chart is well-formed; only that the requested filename exists. +// +// Order of resolution: +// - relative to current working directory +// - if path is absolute or begins with '.', error out here +// - URL +// +// If 'verify' was set on ChartPathOptions, this will attempt to also verify the chart. +func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) { + if registry.IsOCI(name) && c.registryClient == nil { + return "", fmt.Errorf("unable to lookup chart %q, missing registry client", name) + } + + name = strings.TrimSpace(name) + version := strings.TrimSpace(c.Version) + + if _, err := os.Stat(name); err == nil { + abs, err := filepath.Abs(name) + if err != nil { + return abs, err + } + if c.Verify { + if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil { + return "", err + } + } + return abs, nil + } + if filepath.IsAbs(name) || strings.HasPrefix(name, ".") { + return name, errors.Errorf("path %q not found", name) + } + + dl := downloader.ChartDownloader{ + Out: os.Stdout, + Keyring: c.Keyring, + Getters: getter.All(settings), + Options: []getter.Option{ + getter.WithPassCredentialsAll(c.PassCredentialsAll), + getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), + getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), + getter.WithPlainHTTP(c.PlainHTTP), + getter.WithBasicAuth(c.Username, c.Password), + }, + RepositoryConfig: settings.RepositoryConfig, + RepositoryCache: settings.RepositoryCache, + RegistryClient: c.registryClient, + } + + if registry.IsOCI(name) { + dl.Options = append(dl.Options, getter.WithRegistryClient(c.registryClient)) + } + + if c.Verify { + dl.Verify = downloader.VerifyAlways + } + if c.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version, + c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings)) + if err != nil { + return "", err + } + name = chartURL + + // Only pass the user/pass on when the user has said to or when the + // location of the chart repo and the chart are the same domain. + u1, err := url.Parse(c.RepoURL) + if err != nil { + return "", err + } + u2, err := url.Parse(chartURL) + if err != nil { + return "", err + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth("", "")) + } + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } + + if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { + return "", err + } + + filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache) + if err != nil { + return "", err + } + + lname, err := filepath.Abs(filename) + if err != nil { + return filename, err + } + return lname, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go new file mode 100644 index 000000000000..9037782bb5ca --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go @@ -0,0 +1,197 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "sync" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// lazyClient is a workaround to deal with Kubernetes having an unstable client API. +// In Kubernetes v1.18 the defaults where removed which broke creating a +// client without an explicit configuration. ಠ_ಠ +type lazyClient struct { + // client caches an initialized kubernetes client + initClient sync.Once + client kubernetes.Interface + clientErr error + + // clientFn loads a kubernetes client + clientFn func() (*kubernetes.Clientset, error) + + // namespace passed to each client request + namespace string +} + +func (s *lazyClient) init() error { + s.initClient.Do(func() { + s.client, s.clientErr = s.clientFn() + }) + return s.clientErr +} + +// secretClient implements a corev1.SecretsInterface +type secretClient struct{ *lazyClient } + +var _ corev1.SecretInterface = (*secretClient)(nil) + +func newSecretClient(lc *lazyClient) *secretClient { + return &secretClient{lazyClient: lc} +} + +func (s *secretClient) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Create(ctx, secret, opts) +} + +func (s *secretClient) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Update(ctx, secret, opts) +} + +func (s *secretClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).Delete(ctx, name, opts) +} + +func (s *secretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (s *secretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Get(ctx, name, opts) +} + +func (s *secretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).List(ctx, opts) +} + +func (s *secretClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Watch(ctx, opts) +} + +func (s *secretClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (s *secretClient) Apply(ctx context.Context, secretConfiguration *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Apply(ctx, secretConfiguration, opts) +} + +// configMapClient implements a corev1.ConfigMapInterface +type configMapClient struct{ *lazyClient } + +var _ corev1.ConfigMapInterface = (*configMapClient)(nil) + +func newConfigMapClient(lc *lazyClient) *configMapClient { + return &configMapClient{lazyClient: lc} +} + +func (c *configMapClient) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Create(ctx, configMap, opts) +} + +func (c *configMapClient) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Update(ctx, configMap, opts) +} + +func (c *configMapClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, opts) +} + +func (c *configMapClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (c *configMapClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, opts) +} + +func (c *configMapClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).List(ctx, opts) +} + +func (c *configMapClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Watch(ctx, opts) +} + +func (c *configMapClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (c *configMapClient) Apply(ctx context.Context, configMap *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Apply(ctx, configMap, opts) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lint.go b/vendor/helm.sh/helm/v3/pkg/action/lint.go new file mode 100644 index 000000000000..63a1bf354e96 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lint.go @@ -0,0 +1,130 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Lint is the action for checking that the semantics of a chart are well-formed. +// +// It provides the implementation of 'helm lint'. +type Lint struct { + Strict bool + Namespace string + WithSubcharts bool + Quiet bool + SkipSchemaValidation bool + KubeVersion *chartutil.KubeVersion +} + +// LintResult is the result of Lint +type LintResult struct { + TotalChartsLinted int + Messages []support.Message + Errors []error +} + +// NewLint creates a new Lint object with the given configuration. +func NewLint() *Lint { + return &Lint{} +} + +// Run executes 'helm Lint' against the given chart. +func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult { + lowestTolerance := support.ErrorSev + if l.Strict { + lowestTolerance = support.WarningSev + } + result := &LintResult{} + for _, path := range paths { + linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion, l.SkipSchemaValidation) + if err != nil { + result.Errors = append(result.Errors, err) + continue + } + + result.Messages = append(result.Messages, linter.Messages...) + result.TotalChartsLinted++ + for _, msg := range linter.Messages { + if msg.Severity >= lowestTolerance { + result.Errors = append(result.Errors, msg.Err) + } + } + } + return result +} + +// HasWarningsOrErrors checks is LintResult has any warnings or errors +func HasWarningsOrErrors(result *LintResult) bool { + for _, msg := range result.Messages { + if msg.Severity > support.InfoSev { + return true + } + } + return len(result.Errors) > 0 +} + +func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) (support.Linter, error) { + var chartPath string + linter := support.Linter{} + + if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") { + tempDir, err := os.MkdirTemp("", "helm-lint") + if err != nil { + return linter, errors.Wrap(err, "unable to create temp dir to extract tarball") + } + defer os.RemoveAll(tempDir) + + file, err := os.Open(path) + if err != nil { + return linter, errors.Wrap(err, "unable to open tarball") + } + defer file.Close() + + if err = chartutil.Expand(tempDir, file); err != nil { + return linter, errors.Wrap(err, "unable to extract tarball") + } + + files, err := os.ReadDir(tempDir) + if err != nil { + return linter, errors.Wrapf(err, "unable to read temporary output directory %s", tempDir) + } + if !files[0].IsDir() { + return linter, errors.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir) + } + + chartPath = filepath.Join(tempDir, files[0].Name()) + } else { + chartPath = path + } + + // Guard: Error out if this is not a chart. + if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil { + return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart") + } + + return lint.AllWithKubeVersionAndSchemaValidation(chartPath, vals, namespace, kubeVersion, skipSchemaValidation), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/list.go b/vendor/helm.sh/helm/v3/pkg/action/list.go new file mode 100644 index 000000000000..af0725c4a9b8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/list.go @@ -0,0 +1,324 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "path" + "regexp" + + "k8s.io/apimachinery/pkg/labels" + + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" +) + +// ListStates represents zero or more status codes that a list item may have set +// +// Because this is used as a bitmask filter, more than one bit can be flipped +// in the ListStates. +type ListStates uint + +const ( + // ListDeployed filters on status "deployed" + ListDeployed ListStates = 1 << iota + // ListUninstalled filters on status "uninstalled" + ListUninstalled + // ListUninstalling filters on status "uninstalling" (uninstall in progress) + ListUninstalling + // ListPendingInstall filters on status "pending" (deployment in progress) + ListPendingInstall + // ListPendingUpgrade filters on status "pending_upgrade" (upgrade in progress) + ListPendingUpgrade + // ListPendingRollback filters on status "pending_rollback" (rollback in progress) + ListPendingRollback + // ListSuperseded filters on status "superseded" (historical release version that is no longer deployed) + ListSuperseded + // ListFailed filters on status "failed" (release version not deployed because of error) + ListFailed + // ListUnknown filters on an unknown status + ListUnknown +) + +// FromName takes a state name and returns a ListStates representation. +// +// Currently, there are only names for individual flipped bits, so the returned +// ListStates will only match one of the constants. However, it is possible that +// this behavior could change in the future. +func (s ListStates) FromName(str string) ListStates { + switch str { + case "deployed": + return ListDeployed + case "uninstalled": + return ListUninstalled + case "superseded": + return ListSuperseded + case "failed": + return ListFailed + case "uninstalling": + return ListUninstalling + case "pending-install": + return ListPendingInstall + case "pending-upgrade": + return ListPendingUpgrade + case "pending-rollback": + return ListPendingRollback + } + return ListUnknown +} + +// ListAll is a convenience for enabling all list filters +const ListAll = ListDeployed | ListUninstalled | ListUninstalling | ListPendingInstall | ListPendingRollback | ListPendingUpgrade | ListSuperseded | ListFailed + +// Sorter is a top-level sort +type Sorter uint + +const ( + // ByNameDesc sorts by descending lexicographic order + ByNameDesc Sorter = iota + 1 + // ByDateAsc sorts by ascending dates (oldest updated release first) + ByDateAsc + // ByDateDesc sorts by descending dates (latest updated release first) + ByDateDesc +) + +// List is the action for listing releases. +// +// It provides, for example, the implementation of 'helm list'. +// It returns no more than one revision of every release in one specific, or in +// all, namespaces. +// To list all the revisions of a specific release, see the History action. +type List struct { + cfg *Configuration + + // All ignores the limit/offset + All bool + // AllNamespaces searches across namespaces + AllNamespaces bool + // Sort indicates the sort to use + // + // see pkg/releaseutil for several useful sorters + Sort Sorter + // Overrides the default lexicographic sorting + ByDate bool + SortReverse bool + // StateMask accepts a bitmask of states for items to show. + // The default is ListDeployed + StateMask ListStates + // Limit is the number of items to return per Run() + Limit int + // Offset is the starting index for the Run() call + Offset int + // Filter is a filter that is applied to the results + Filter string + Short bool + NoHeaders bool + TimeFormat string + Uninstalled bool + Superseded bool + Uninstalling bool + Deployed bool + Failed bool + Pending bool + Selector string +} + +// NewList constructs a new *List +func NewList(cfg *Configuration) *List { + return &List{ + StateMask: ListDeployed | ListFailed, + cfg: cfg, + } +} + +// Run executes the list command, returning a set of matches. +func (l *List) Run() ([]*release.Release, error) { + if err := l.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + var filter *regexp.Regexp + if l.Filter != "" { + var err error + filter, err = regexp.Compile(l.Filter) + if err != nil { + return nil, err + } + } + + results, err := l.cfg.Releases.List(func(rel *release.Release) bool { + // Skip anything that doesn't match the filter. + if filter != nil && !filter.MatchString(rel.Name) { + return false + } + + return true + }) + + if err != nil { + return nil, err + } + + if results == nil { + return results, nil + } + + // by definition, superseded releases are never shown if + // only the latest releases are returned. so if requested statemask + // is _only_ ListSuperseded, skip the latest release filter + if l.StateMask != ListSuperseded { + results = filterLatestReleases(results) + } + + // State mask application must occur after filtering to + // latest releases, otherwise outdated entries can be returned + results = l.filterStateMask(results) + + // Skip anything that doesn't match the selector + selectorObj, err := labels.Parse(l.Selector) + if err != nil { + return nil, err + } + results = l.filterSelector(results, selectorObj) + + // Unfortunately, we have to sort before truncating, which can incur substantial overhead + l.sort(results) + + // Guard on offset + if l.Offset >= len(results) { + return []*release.Release{}, nil + } + + // Calculate the limit and offset, and then truncate results if necessary. + limit := len(results) + if l.Limit > 0 && l.Limit < limit { + limit = l.Limit + } + last := l.Offset + limit + if l := len(results); l < last { + last = l + } + results = results[l.Offset:last] + + return results, err +} + +// sort is an in-place sort where order is based on the value of a.Sort +func (l *List) sort(rels []*release.Release) { + if l.SortReverse { + l.Sort = ByNameDesc + } + + if l.ByDate { + l.Sort = ByDateDesc + if l.SortReverse { + l.Sort = ByDateAsc + } + } + + switch l.Sort { + case ByDateDesc: + releaseutil.SortByDate(rels) + case ByDateAsc: + releaseutil.Reverse(rels, releaseutil.SortByDate) + case ByNameDesc: + releaseutil.Reverse(rels, releaseutil.SortByName) + default: + releaseutil.SortByName(rels) + } +} + +// filterLatestReleases returns a list scrubbed of old releases. +func filterLatestReleases(releases []*release.Release) []*release.Release { + latestReleases := make(map[string]*release.Release) + + for _, rls := range releases { + name, namespace := rls.Name, rls.Namespace + key := path.Join(namespace, name) + if latestRelease, exists := latestReleases[key]; exists && latestRelease.Version > rls.Version { + continue + } + latestReleases[key] = rls + } + + var list = make([]*release.Release, 0, len(latestReleases)) + for _, rls := range latestReleases { + list = append(list, rls) + } + return list +} + +func (l *List) filterStateMask(releases []*release.Release) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + currentStatus := l.StateMask.FromName(rls.Info.Status.String()) + mask := l.StateMask & currentStatus + if mask == 0 { + continue + } + desiredStateReleases = append(desiredStateReleases, rls) + } + + return desiredStateReleases +} + +func (l *List) filterSelector(releases []*release.Release, selector labels.Selector) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + if selector.Matches(labels.Set(rls.Labels)) { + desiredStateReleases = append(desiredStateReleases, rls) + } + } + + return desiredStateReleases +} + +// SetStateMask calculates the state mask based on parameters. +func (l *List) SetStateMask() { + if l.All { + l.StateMask = ListAll + return + } + + state := ListStates(0) + if l.Deployed { + state |= ListDeployed + } + if l.Uninstalled { + state |= ListUninstalled + } + if l.Uninstalling { + state |= ListUninstalling + } + if l.Pending { + state |= ListPendingInstall | ListPendingRollback | ListPendingUpgrade + } + if l.Failed { + state |= ListFailed + } + if l.Superseded { + state |= ListSuperseded + } + + // Apply a default + if state == 0 { + state = ListDeployed | ListFailed + } + + l.StateMask = state +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/package.go b/vendor/helm.sh/helm/v3/pkg/action/package.go new file mode 100644 index 000000000000..2357e3882a5c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/package.go @@ -0,0 +1,188 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bufio" + "fmt" + "os" + "syscall" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "golang.org/x/term" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/provenance" +) + +// Package is the action for packaging a chart. +// +// It provides the implementation of 'helm package'. +type Package struct { + Sign bool + Key string + Keyring string + PassphraseFile string + Version string + AppVersion string + Destination string + DependencyUpdate bool + + RepositoryConfig string + RepositoryCache string + PlainHTTP bool + Username string + Password string + CertFile string + KeyFile string + CaFile string + InsecureSkipTLSverify bool +} + +// NewPackage creates a new Package object with the given configuration. +func NewPackage() *Package { + return &Package{} +} + +// Run executes 'helm package' against the given chart and returns the path to the packaged chart. +func (p *Package) Run(path string, _ map[string]interface{}) (string, error) { + ch, err := loader.LoadDir(path) + if err != nil { + return "", err + } + + // If version is set, modify the version. + if p.Version != "" { + ch.Metadata.Version = p.Version + } + + if err := validateVersion(ch.Metadata.Version); err != nil { + return "", err + } + + if p.AppVersion != "" { + ch.Metadata.AppVersion = p.AppVersion + } + + if reqs := ch.Metadata.Dependencies; reqs != nil { + if err := CheckDependencies(ch, reqs); err != nil { + return "", err + } + } + + var dest string + if p.Destination == "." { + // Save to the current working directory. + dest, err = os.Getwd() + if err != nil { + return "", err + } + } else { + // Otherwise save to set destination + dest = p.Destination + } + + name, err := chartutil.Save(ch, dest) + if err != nil { + return "", errors.Wrap(err, "failed to save") + } + + if p.Sign { + err = p.Clearsign(name) + } + + return name, err +} + +// validateVersion Verify that version is a Version, and error out if it is not. +func validateVersion(ver string) error { + if _, err := semver.NewVersion(ver); err != nil { + return err + } + return nil +} + +// Clearsign signs a chart +func (p *Package) Clearsign(filename string) error { + // Load keyring + signer, err := provenance.NewFromKeyring(p.Keyring, p.Key) + if err != nil { + return err + } + + passphraseFetcher := promptUser + if p.PassphraseFile != "" { + passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin) + if err != nil { + return err + } + } + + if err := signer.DecryptKey(passphraseFetcher); err != nil { + return err + } + + sig, err := signer.ClearSign(filename) + if err != nil { + return err + } + + return os.WriteFile(filename+".prov", []byte(sig), 0644) +} + +// promptUser implements provenance.PassphraseFetcher +func promptUser(name string) ([]byte, error) { + fmt.Printf("Password for key %q > ", name) + // syscall.Stdin is not an int in all environments and needs to be coerced + // into one there (e.g., Windows) + pw, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + return pw, err +} + +func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) { + file, err := openPassphraseFile(passphraseFile, stdin) + if err != nil { + return nil, err + } + defer file.Close() + + reader := bufio.NewReader(file) + passphrase, _, err := reader.ReadLine() + if err != nil { + return nil, err + } + return func(_ string) ([]byte, error) { + return passphrase, nil + }, nil +} + +func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) { + if passphraseFile == "-" { + stat, err := stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeNamedPipe) == 0 { + return nil, errors.New("specified reading passphrase from stdin, without input on stdin") + } + return stdin, nil + } + return os.Open(passphraseFile) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go new file mode 100644 index 000000000000..787553125536 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -0,0 +1,172 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// Pull is the action for checking a given release's information. +// +// It provides the implementation of 'helm pull'. +type Pull struct { + ChartPathOptions + + Settings *cli.EnvSettings // TODO: refactor this out of pkg/action + + Devel bool + Untar bool + VerifyLater bool + UntarDir string + DestDir string + cfg *Configuration +} + +type PullOpt func(*Pull) + +func WithConfig(cfg *Configuration) PullOpt { + return func(p *Pull) { + p.cfg = cfg + } +} + +// NewPull creates a new Pull object. +func NewPull() *Pull { + return NewPullWithOpts() +} + +// NewPullWithOpts creates a new pull, with configuration options. +func NewPullWithOpts(opts ...PullOpt) *Pull { + p := &Pull{} + for _, fn := range opts { + fn(p) + } + + return p +} + +// SetRegistryClient sets the registry client on the pull configuration object. +func (p *Pull) SetRegistryClient(client *registry.Client) { + p.cfg.RegistryClient = client +} + +// Run executes 'helm pull' against the given release. +func (p *Pull) Run(chartRef string) (string, error) { + var out strings.Builder + + c := downloader.ChartDownloader{ + Out: &out, + Keyring: p.Keyring, + Verify: downloader.VerifyNever, + Getters: getter.All(p.Settings), + Options: []getter.Option{ + getter.WithBasicAuth(p.Username, p.Password), + getter.WithPassCredentialsAll(p.PassCredentialsAll), + getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile), + getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify), + getter.WithPlainHTTP(p.PlainHTTP), + }, + RegistryClient: p.cfg.RegistryClient, + RepositoryConfig: p.Settings.RepositoryConfig, + RepositoryCache: p.Settings.RepositoryCache, + } + + if registry.IsOCI(chartRef) { + c.Options = append(c.Options, + getter.WithRegistryClient(p.cfg.RegistryClient)) + c.RegistryClient = p.cfg.RegistryClient + } + + if p.Verify { + c.Verify = downloader.VerifyAlways + } else if p.VerifyLater { + c.Verify = downloader.VerifyLater + } + + // If untar is set, we fetch to a tempdir, then untar and copy after + // verification. + dest := p.DestDir + if p.Untar { + var err error + dest, err = os.MkdirTemp("", "helm-") + if err != nil { + return out.String(), errors.Wrap(err, "failed to untar") + } + defer os.RemoveAll(dest) + } + + if p.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) + if err != nil { + return out.String(), err + } + chartRef = chartURL + } + + saved, v, err := c.DownloadTo(chartRef, p.Version, dest) + if err != nil { + return out.String(), err + } + + if p.Verify { + for name := range v.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", v.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", v.FileHash) + } + + // After verification, untar the chart into the requested directory. + if p.Untar { + ud := p.UntarDir + if !filepath.IsAbs(ud) { + ud = filepath.Join(p.DestDir, ud) + } + // Let udCheck to check conflict file/dir without replacing ud when untarDir is the current directory(.). + udCheck := ud + if udCheck == "." { + _, udCheck = filepath.Split(chartRef) + } else { + _, chartName := filepath.Split(chartRef) + udCheck = filepath.Join(udCheck, chartName) + } + + if _, err := os.Stat(udCheck); err != nil { + if err := os.MkdirAll(udCheck, 0755); err != nil { + return out.String(), errors.Wrap(err, "failed to untar (mkdir)") + } + + } else { + return out.String(), errors.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck) + } + + return out.String(), chartutil.ExpandFile(ud, saved) + } + return out.String(), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/push.go b/vendor/helm.sh/helm/v3/pkg/action/push.go new file mode 100644 index 000000000000..70e5c158238a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/push.go @@ -0,0 +1,112 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + "strings" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/pusher" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/uploader" +) + +// Push is the action for uploading a chart. +// +// It provides the implementation of 'helm push'. +type Push struct { + Settings *cli.EnvSettings + cfg *Configuration + certFile string + keyFile string + caFile string + insecureSkipTLSverify bool + plainHTTP bool + out io.Writer +} + +// PushOpt is a type of function that sets options for a push action. +type PushOpt func(*Push) + +// WithPushConfig sets the cfg field on the push configuration object. +func WithPushConfig(cfg *Configuration) PushOpt { + return func(p *Push) { + p.cfg = cfg + } +} + +// WithTLSClientConfig sets the certFile, keyFile, and caFile fields on the push configuration object. +func WithTLSClientConfig(certFile, keyFile, caFile string) PushOpt { + return func(p *Push) { + p.certFile = certFile + p.keyFile = keyFile + p.caFile = caFile + } +} + +// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) PushOpt { + return func(p *Push) { + p.insecureSkipTLSverify = insecureSkipTLSVerify + } +} + +// WithPlainHTTP configures the use of plain HTTP connections. +func WithPlainHTTP(plainHTTP bool) PushOpt { + return func(p *Push) { + p.plainHTTP = plainHTTP + } +} + +// WithPushOptWriter sets the registryOut field on the push configuration object. +func WithPushOptWriter(out io.Writer) PushOpt { + return func(p *Push) { + p.out = out + } +} + +// NewPushWithOpts creates a new push, with configuration options. +func NewPushWithOpts(opts ...PushOpt) *Push { + p := &Push{} + for _, fn := range opts { + fn(p) + } + return p +} + +// Run executes 'helm push' against the given chart archive. +func (p *Push) Run(chartRef string, remote string) (string, error) { + var out strings.Builder + + c := uploader.ChartUploader{ + Out: &out, + Pushers: pusher.All(p.Settings), + Options: []pusher.Option{ + pusher.WithTLSClientConfig(p.certFile, p.keyFile, p.caFile), + pusher.WithInsecureSkipTLSVerify(p.insecureSkipTLSverify), + pusher.WithPlainHTTP(p.plainHTTP), + }, + } + + if registry.IsOCI(remote) { + // Don't use the default registry client if tls options are set. + c.Options = append(c.Options, pusher.WithRegistryClient(p.cfg.RegistryClient)) + } + + return out.String(), c.UploadTo(chartRef, remote) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_login.go b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go new file mode 100644 index 000000000000..b4e038123a42 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go @@ -0,0 +1,99 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/pkg/registry" +) + +// RegistryLogin performs a registry login operation. +type RegistryLogin struct { + cfg *Configuration + certFile string + keyFile string + caFile string + insecure bool + plainHTTP bool +} + +type RegistryLoginOpt func(*RegistryLogin) error + +// WithCertFile specifies the path to the certificate file to use for TLS. +func WithCertFile(certFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.certFile = certFile + return nil + } +} + +// WithInsecure specifies whether to verify certificates. +func WithInsecure(insecure bool) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.insecure = insecure + return nil + } +} + +// WithKeyFile specifies the path to the key file to use for TLS. +func WithKeyFile(keyFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.keyFile = keyFile + return nil + } +} + +// WithCAFile specifies the path to the CA file to use for TLS. +func WithCAFile(caFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.caFile = caFile + return nil + } +} + +// WithPlainHTTPLogin use http rather than https for login. +func WithPlainHTTPLogin(isPlain bool) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.plainHTTP = isPlain + return nil + } +} + +// NewRegistryLogin creates a new RegistryLogin object with the given configuration. +func NewRegistryLogin(cfg *Configuration) *RegistryLogin { + return &RegistryLogin{ + cfg: cfg, + } +} + +// Run executes the registry login operation +func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error { + for _, opt := range opts { + if err := opt(a); err != nil { + return err + } + } + + return a.cfg.RegistryClient.Login( + hostname, + registry.LoginOptBasicAuth(username, password), + registry.LoginOptInsecure(a.insecure), + registry.LoginOptTLSClientConfig(a.certFile, a.keyFile, a.caFile), + registry.LoginOptPlainText(a.plainHTTP), + ) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go new file mode 100644 index 000000000000..7ce92defcfcb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" +) + +// RegistryLogout performs a registry login operation. +type RegistryLogout struct { + cfg *Configuration +} + +// NewRegistryLogout creates a new RegistryLogout object with the given configuration. +func NewRegistryLogout(cfg *Configuration) *RegistryLogout { + return &RegistryLogout{ + cfg: cfg, + } +} + +// Run executes the registry logout operation +func (a *RegistryLogout) Run(_ io.Writer, hostname string) error { + return a.cfg.RegistryClient.Logout(hostname) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/release_testing.go b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go new file mode 100644 index 000000000000..aaffe47cae1e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go @@ -0,0 +1,153 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "fmt" + "io" + "sort" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +const ( + ExcludeNameFilter = "!name" + IncludeNameFilter = "name" +) + +// ReleaseTesting is the action for testing a release. +// +// It provides the implementation of 'helm test'. +type ReleaseTesting struct { + cfg *Configuration + Timeout time.Duration + // Used for fetching logs from test pods + Namespace string + Filters map[string][]string + HideNotes bool +} + +// NewReleaseTesting creates a new ReleaseTesting object with the given configuration. +func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { + return &ReleaseTesting{ + cfg: cfg, + Filters: map[string][]string{}, + } +} + +// Run executes 'helm test' against the given release. +func (r *ReleaseTesting) Run(name string) (*release.Release, error) { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseTest: Release name is invalid: %s", name) + } + + // finds the non-deleted release with the given name + rel, err := r.cfg.Releases.Last(name) + if err != nil { + return rel, err + } + + skippedHooks := []*release.Hook{} + executingHooks := []*release.Hook{} + if len(r.Filters[ExcludeNameFilter]) != 0 { + for _, h := range rel.Hooks { + if contains(r.Filters[ExcludeNameFilter], h.Name) { + skippedHooks = append(skippedHooks, h) + } else { + executingHooks = append(executingHooks, h) + } + } + rel.Hooks = executingHooks + } + if len(r.Filters[IncludeNameFilter]) != 0 { + executingHooks = nil + for _, h := range rel.Hooks { + if contains(r.Filters[IncludeNameFilter], h.Name) { + executingHooks = append(executingHooks, h) + } else { + skippedHooks = append(skippedHooks, h) + } + } + rel.Hooks = executingHooks + } + + if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + rel.Hooks = append(skippedHooks, rel.Hooks...) + r.cfg.Releases.Update(rel) + return rel, err + } + + rel.Hooks = append(skippedHooks, rel.Hooks...) + return rel, r.cfg.Releases.Update(rel) +} + +// GetPodLogs will write the logs for all test pods in the given release into +// the given writer. These can be immediately output to the user or captured for +// other uses +func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error { + client, err := r.cfg.KubernetesClientSet() + if err != nil { + return errors.Wrap(err, "unable to get kubernetes client to fetch pod logs") + } + + hooksByWight := append([]*release.Hook{}, rel.Hooks...) + sort.Stable(hookByWeight(hooksByWight)) + for _, h := range hooksByWight { + for _, e := range h.Events { + if e == release.HookTest { + if contains(r.Filters[ExcludeNameFilter], h.Name) { + continue + } + if len(r.Filters[IncludeNameFilter]) > 0 && !contains(r.Filters[IncludeNameFilter], h.Name) { + continue + } + req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{}) + logReader, err := req.Stream(context.Background()) + if err != nil { + return errors.Wrapf(err, "unable to get pod logs for %s", h.Name) + } + + fmt.Fprintf(out, "POD LOGS: %s\n", h.Name) + _, err = io.Copy(out, logReader) + fmt.Fprintln(out) + if err != nil { + return errors.Wrapf(err, "unable to write pod logs for %s", h.Name) + } + } + } + } + return nil +} + +func contains(arr []string, value string) bool { + for _, item := range arr { + if item == value { + return true + } + } + return false +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go new file mode 100644 index 000000000000..63e83f3d940d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go @@ -0,0 +1,46 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/releaseutil" +) + +func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) { + for _, m := range manifests { + if m.Head.Metadata == nil || m.Head.Metadata.Annotations == nil || len(m.Head.Metadata.Annotations) == 0 { + remaining = append(remaining, m) + continue + } + + resourcePolicyType, ok := m.Head.Metadata.Annotations[kube.ResourcePolicyAnno] + if !ok { + remaining = append(remaining, m) + continue + } + + resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType)) + if resourcePolicyType == kube.KeepPolicy { + keep = append(keep, m) + } + + } + return keep, remaining +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/rollback.go b/vendor/helm.sh/helm/v3/pkg/action/rollback.go new file mode 100644 index 000000000000..b0be17d1305f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/rollback.go @@ -0,0 +1,265 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Rollback is the action for rolling back to a given release. +// +// It provides the implementation of 'helm rollback'. +type Rollback struct { + cfg *Configuration + + Version int + Timeout time.Duration + Wait bool + WaitForJobs bool + DisableHooks bool + DryRun bool + Recreate bool // will (if true) recreate pods after a rollback. + Force bool // will (if true) force resource upgrade through uninstall/recreate if needed + CleanupOnFail bool + MaxHistory int // MaxHistory limits the maximum number of revisions saved per release +} + +// NewRollback creates a new Rollback object with the given configuration. +func NewRollback(cfg *Configuration) *Rollback { + return &Rollback{ + cfg: cfg, + } +} + +// Run executes 'helm rollback' against the given release. +func (r *Rollback) Run(name string) error { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return err + } + + r.cfg.Releases.MaxHistory = r.MaxHistory + + r.cfg.Log("preparing rollback of %s", name) + currentRelease, targetRelease, err := r.prepareRollback(name) + if err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("creating rolled back release for %s", name) + if err := r.cfg.Releases.Create(targetRelease); err != nil { + return err + } + } + + r.cfg.Log("performing rollback of %s", name) + if _, err := r.performRollback(currentRelease, targetRelease); err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("updating status for rolled back release for %s", name) + if err := r.cfg.Releases.Update(targetRelease); err != nil { + return err + } + } + return nil +} + +// prepareRollback finds the previous release and prepares a new release object with +// the previous release's configuration +func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, nil, errors.Errorf("prepareRollback: Release name is invalid: %s", name) + } + + if r.Version < 0 { + return nil, nil, errInvalidRevision + } + + currentRelease, err := r.cfg.Releases.Last(name) + if err != nil { + return nil, nil, err + } + + previousVersion := r.Version + if r.Version == 0 { + previousVersion = currentRelease.Version - 1 + } + + historyReleases, err := r.cfg.Releases.History(name) + if err != nil { + return nil, nil, err + } + + // Check if the history version to be rolled back exists + previousVersionExist := false + for _, historyRelease := range historyReleases { + version := historyRelease.Version + if previousVersion == version { + previousVersionExist = true + break + } + } + if !previousVersionExist { + return nil, nil, errors.Errorf("release has no %d version", previousVersion) + } + + r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion) + + previousRelease, err := r.cfg.Releases.Get(name, previousVersion) + if err != nil { + return nil, nil, err + } + + // Store a new release object with previous release's configuration + targetRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: previousRelease.Chart, + Config: previousRelease.Config, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: helmtime.Now(), + Status: release.StatusPendingRollback, + Notes: previousRelease.Info.Notes, + // Because we lose the reference to previous version elsewhere, we set the + // message here, and only override it later if we experience failure. + Description: fmt.Sprintf("Rollback to %d", previousVersion), + }, + Version: currentRelease.Version + 1, + Labels: previousRelease.Labels, + Manifest: previousRelease.Manifest, + Hooks: previousRelease.Hooks, + } + + return currentRelease, targetRelease, nil +} + +func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { + if r.DryRun { + r.cfg.Log("dry run for %s", targetRelease.Name) + return targetRelease, nil + } + + current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // pre-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + return targetRelease, err + } + } else { + r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) + } + + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = target.Visit(setMetadataVisitor(targetRelease.Name, targetRelease.Namespace, true)) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to set metadata visitor from target release") + } + results, err := r.cfg.KubeClient.Update(current, target, r.Force) + + if err != nil { + msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) + r.cfg.Log("warning: %s", msg) + currentRelease.Info.Status = release.StatusSuperseded + targetRelease.Info.Status = release.StatusFailed + targetRelease.Info.Description = msg + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + if r.CleanupOnFail { + r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created)) + _, errs := r.cfg.KubeClient.Delete(results.Created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) + } + r.cfg.Log("Resource cleanup complete") + } + return targetRelease, err + } + + if r.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(r.cfg, results.Updated); err != nil { + r.cfg.Log(err.Error()) + } + } + + if r.Wait { + if r.WaitForJobs { + if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } else { + if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } + } + + // post-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + return targetRelease, err + } + } + + deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name) + if err != nil && !strings.Contains(err.Error(), "has no deployed releases") { + return nil, err + } + // Supersede all previous deployments, see issue #2941. + for _, rel := range deployed { + r.cfg.Log("superseding previous deployment %d", rel.Version) + rel.Info.Status = release.StatusSuperseded + r.cfg.recordRelease(rel) + } + + targetRelease.Info.Status = release.StatusDeployed + + return targetRelease, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/show.go b/vendor/helm.sh/helm/v3/pkg/action/show.go new file mode 100644 index 000000000000..6ed855b83fd5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/show.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pkg/errors" + "k8s.io/cli-runtime/pkg/printers" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/registry" +) + +// ShowOutputFormat is the format of the output of `helm show` +type ShowOutputFormat string + +const ( + // ShowAll is the format which shows all the information of a chart + ShowAll ShowOutputFormat = "all" + // ShowChart is the format which only shows the chart's definition + ShowChart ShowOutputFormat = "chart" + // ShowValues is the format which only shows the chart's values + ShowValues ShowOutputFormat = "values" + // ShowReadme is the format which only shows the chart's README + ShowReadme ShowOutputFormat = "readme" + // ShowCRDs is the format which only shows the chart's CRDs + ShowCRDs ShowOutputFormat = "crds" +) + +var readmeFileNames = []string{"readme.md", "readme.txt", "readme"} + +func (o ShowOutputFormat) String() string { + return string(o) +} + +// Show is the action for checking a given release's information. +// +// It provides the implementation of 'helm show' and its respective subcommands. +type Show struct { + ChartPathOptions + Devel bool + OutputFormat ShowOutputFormat + JSONPathTemplate string + chart *chart.Chart // for testing +} + +// NewShow creates a new Show object with the given configuration. +// Deprecated: Use NewShowWithConfig +// TODO Helm 4: Fold NewShowWithConfig back into NewShow +func NewShow(output ShowOutputFormat) *Show { + return &Show{ + OutputFormat: output, + } +} + +// NewShowWithConfig creates a new Show object with the given configuration. +func NewShowWithConfig(output ShowOutputFormat, cfg *Configuration) *Show { + sh := &Show{ + OutputFormat: output, + } + sh.ChartPathOptions.registryClient = cfg.RegistryClient + + return sh +} + +// SetRegistryClient sets the registry client to use when pulling a chart from a registry. +func (s *Show) SetRegistryClient(client *registry.Client) { + s.ChartPathOptions.registryClient = client +} + +// Run executes 'helm show' against the given release. +func (s *Show) Run(chartpath string) (string, error) { + if s.chart == nil { + chrt, err := loader.Load(chartpath) + if err != nil { + return "", err + } + s.chart = chrt + } + cf, err := yaml.Marshal(s.chart.Metadata) + if err != nil { + return "", err + } + + var out strings.Builder + if s.OutputFormat == ShowChart || s.OutputFormat == ShowAll { + fmt.Fprintf(&out, "%s\n", cf) + } + + if (s.OutputFormat == ShowValues || s.OutputFormat == ShowAll) && s.chart.Values != nil { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + if s.JSONPathTemplate != "" { + printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate) + if err != nil { + return "", errors.Wrapf(err, "error parsing jsonpath %s", s.JSONPathTemplate) + } + printer.Execute(&out, s.chart.Values) + } else { + for _, f := range s.chart.Raw { + if f.Name == chartutil.ValuesfileName { + fmt.Fprintln(&out, string(f.Data)) + } + } + } + } + + if s.OutputFormat == ShowReadme || s.OutputFormat == ShowAll { + readme := findReadme(s.chart.Files) + if readme != nil { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + fmt.Fprintf(&out, "%s\n", readme.Data) + } + } + + if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll { + crds := s.chart.CRDObjects() + if len(crds) > 0 { + if s.OutputFormat == ShowAll && !bytes.HasPrefix(crds[0].File.Data, []byte("---")) { + fmt.Fprintln(&out, "---") + } + for _, crd := range crds { + fmt.Fprintf(&out, "%s\n", string(crd.File.Data)) + } + } + } + return out.String(), nil +} + +func findReadme(files []*chart.File) (file *chart.File) { + for _, file := range files { + for _, n := range readmeFileNames { + if file == nil { + continue + } + if strings.EqualFold(file.Name, n) { + return file + } + } + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/status.go b/vendor/helm.sh/helm/v3/pkg/action/status.go new file mode 100644 index 000000000000..ee1c9d613863 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/status.go @@ -0,0 +1,95 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "errors" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" +) + +// Status is the action for checking the deployment status of releases. +// +// It provides the implementation of 'helm status'. +type Status struct { + cfg *Configuration + + Version int + + // If true, display description to output format, + // only affect print type table. + // TODO Helm 4: Remove this flag and output the description by default. + ShowDescription bool + + // ShowResources sets if the resources should be retrieved with the status. + // TODO Helm 4: Remove this flag and output the resources by default. + ShowResources bool + + // ShowResourcesTable is used with ShowResources. When true this will cause + // the resulting objects to be retrieved as a kind=table. + ShowResourcesTable bool +} + +// NewStatus creates a new Status object with the given configuration. +func NewStatus(cfg *Configuration) *Status { + return &Status{ + cfg: cfg, + } +} + +// Run executes 'helm status' against the given release. +func (s *Status) Run(name string) (*release.Release, error) { + if err := s.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if !s.ShowResources { + return s.cfg.releaseContent(name, s.Version) + } + + rel, err := s.cfg.releaseContent(name, s.Version) + if err != nil { + return nil, err + } + + if kubeClient, ok := s.cfg.KubeClient.(kube.InterfaceResources); ok { + var resources kube.ResourceList + if s.ShowResourcesTable { + resources, err = kubeClient.BuildTable(bytes.NewBufferString(rel.Manifest), false) + if err != nil { + return nil, err + } + } else { + resources, err = s.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), false) + if err != nil { + return nil, err + } + } + + resp, err := kubeClient.Get(resources, true) + if err != nil { + return nil, err + } + + rel.Info.Resources = resp + + return rel, nil + } + return nil, errors.New("unable to get kubeClient with interface InterfaceResources") +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go new file mode 100644 index 000000000000..ac0c4fee8cc4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go @@ -0,0 +1,247 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + "time" + + "github.com/pkg/errors" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Uninstall is the action for uninstalling releases. +// +// It provides the implementation of 'helm uninstall'. +type Uninstall struct { + cfg *Configuration + + DisableHooks bool + DryRun bool + IgnoreNotFound bool + KeepHistory bool + Wait bool + DeletionPropagation string + Timeout time.Duration + Description string +} + +// NewUninstall creates a new Uninstall object with the given configuration. +func NewUninstall(cfg *Configuration) *Uninstall { + return &Uninstall{ + cfg: cfg, + } +} + +// Run uninstalls the given release. +func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if u.DryRun { + // In the dry run case, just see if the release exists + r, err := u.cfg.releaseContent(name, 0) + if err != nil { + return &release.UninstallReleaseResponse{}, err + } + return &release.UninstallReleaseResponse{Release: r}, nil + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("uninstall: Release name is invalid: %s", name) + } + + rels, err := u.cfg.Releases.History(name) + if err != nil { + if u.IgnoreNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "uninstall: Release not loaded: %s", name) + } + if len(rels) < 1 { + return nil, errMissingRelease + } + + releaseutil.SortByRevision(rels) + rel := rels[len(rels)-1] + + // TODO: Are there any cases where we want to force a delete even if it's + // already marked deleted? + if rel.Info.Status == release.StatusUninstalled { + if !u.KeepHistory { + if err := u.purgeReleases(rels...); err != nil { + return nil, errors.Wrap(err, "uninstall: Failed to purge the release") + } + return &release.UninstallReleaseResponse{Release: rel}, nil + } + return nil, errors.Errorf("the release named %q is already deleted", name) + } + + u.cfg.Log("uninstall: Deleting %s", name) + rel.Info.Status = release.StatusUninstalling + rel.Info.Deleted = helmtime.Now() + rel.Info.Description = "Deletion in progress (or silently failed)" + res := &release.UninstallReleaseResponse{Release: rel} + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + return res, err + } + } else { + u.cfg.Log("delete hooks disabled for %s", name) + } + + // From here on out, the release is currently considered to be in StatusUninstalling + // state. + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + deletedResources, kept, errs := u.deleteRelease(rel) + if errs != nil { + u.cfg.Log("uninstall: Failed to delete release: %s", errs) + return nil, errors.Errorf("failed to delete release: %s", name) + } + + if kept != "" { + kept = "These resources were kept due to the resource policy:\n" + kept + } + res.Info = kept + + if u.Wait { + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) + } + } + } + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + errs = append(errs, err) + } + } + + rel.Info.Status = release.StatusUninstalled + if len(u.Description) > 0 { + rel.Info.Description = u.Description + } else { + rel.Info.Description = "Uninstallation complete" + } + + if !u.KeepHistory { + u.cfg.Log("purge requested for %s", name) + err := u.purgeReleases(rels...) + if err != nil { + errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) + } + + // Return the errors that occurred while deleting the release, if any + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + + return res, nil + } + + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + return res, nil +} + +func (u *Uninstall) purgeReleases(rels ...*release.Release) error { + for _, rel := range rels { + if _, err := u.cfg.Releases.Delete(rel.Name, rel.Version); err != nil { + return err + } + } + return nil +} + +func joinErrors(errs []error) string { + es := make([]string, 0, len(errs)) + for _, e := range errs { + es = append(es, e.Error()) + } + return strings.Join(es, "; ") +} + +// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process +func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, string, []error) { + var errs []error + + manifests := releaseutil.SplitManifests(rel.Manifest) + _, files, err := releaseutil.SortManifests(manifests, nil, releaseutil.UninstallOrder) + if err != nil { + // We could instead just delete everything in no particular order. + // FIXME: One way to delete at this point would be to try a label-based + // deletion. The problem with this is that we could get a false positive + // and delete something that was not legitimately part of this release. + return nil, rel.Manifest, []error{errors.Wrap(err, "corrupted release record. You must manually delete the resources")} + } + + filesToKeep, filesToDelete := filterManifestsToKeep(files) + var kept string + for _, f := range filesToKeep { + kept += "[" + f.Head.Kind + "] " + f.Head.Metadata.Name + "\n" + } + + var builder strings.Builder + for _, file := range filesToDelete { + builder.WriteString("\n---\n" + file.Content) + } + + resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false) + if err != nil { + return nil, "", []error{errors.Wrap(err, "unable to build kubernetes objects for delete")} + } + if len(resources) > 0 { + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { + _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation)) + return resources, kept, errs + } + _, errs = u.cfg.KubeClient.Delete(resources) + } + return resources, kept, errs +} + +func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation { + switch cascadingFlag { + case "orphan": + return v1.DeletePropagationOrphan + case "foreground": + return v1.DeletePropagationForeground + case "background": + return v1.DeletePropagationBackground + default: + cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag) + return v1.DeletePropagationBackground + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go new file mode 100644 index 000000000000..a08d68495f49 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go @@ -0,0 +1,646 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// Upgrade is the action for upgrading releases. +// +// It provides the implementation of 'helm upgrade'. +type Upgrade struct { + cfg *Configuration + + ChartPathOptions + + // Install is a purely informative flag that indicates whether this upgrade was done in "install" mode. + // + // Applications may use this to determine whether this Upgrade operation was done as part of a + // pure upgrade (Upgrade.Install == false) or as part of an install-or-upgrade operation + // (Upgrade.Install == true). + // + // Setting this to `true` will NOT cause `Upgrade` to perform an install if the release does not exist. + // That process must be handled by creating an Install action directly. See cmd/upgrade.go for an + // example of how this flag is used. + Install bool + // Devel indicates that the operation is done in devel mode. + Devel bool + // Namespace is the namespace in which this operation should be performed. + Namespace string + // SkipCRDs skips installing CRDs when install flag is enabled during upgrade + SkipCRDs bool + // Timeout is the timeout for this operation + Timeout time.Duration + // Wait determines whether the wait operation should be performed after the upgrade is requested. + Wait bool + // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. + WaitForJobs bool + // DisableHooks disables hook processing if set to true. + DisableHooks bool + // DryRun controls whether the operation is prepared, but not executed. + DryRun bool + // DryRunOption controls whether the operation is prepared, but not executed with options on whether or not to interact with the remote cluster. + DryRunOption string + // HideSecret can be set to true when DryRun is enabled in order to hide + // Kubernetes Secrets in the output. It cannot be used outside of DryRun. + HideSecret bool + // Force will, if set to `true`, ignore certain warnings and perform the upgrade anyway. + // + // This should be used with caution. + Force bool + // ResetValues will reset the values to the chart's built-ins rather than merging with existing. + ResetValues bool + // ReuseValues will re-use the user's last supplied values. + ReuseValues bool + // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values. + ResetThenReuseValues bool + // Recreate will (if true) recreate pods after a rollback. + Recreate bool + // MaxHistory limits the maximum number of revisions saved per release + MaxHistory int + // Atomic, if true, will roll back on failure. + Atomic bool + // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. + CleanupOnFail bool + // SubNotes determines whether sub-notes are rendered in the chart. + SubNotes bool + // HideNotes determines whether notes are output during upgrade + HideNotes bool + // SkipSchemaValidation determines if JSON schema validation is disabled. + SkipSchemaValidation bool + // Description is the description of this operation + Description string + Labels map[string]string + // PostRender is an optional post-renderer + // + // If this is non-nil, then after templates are rendered, they will be sent to the + // post renderer before sending to the Kubernetes API server. + PostRenderer postrender.PostRenderer + // DisableOpenAPIValidation controls whether OpenAPI validation is enforced. + DisableOpenAPIValidation bool + // Get missing dependencies + DependencyUpdate bool + // Lock to control raceconditions when the process receives a SIGTERM + Lock sync.Mutex + // Enable DNS lookups when rendering templates + EnableDNS bool + // TakeOwnership will skip the check for helm annotations and adopt all existing resources. + TakeOwnership bool +} + +type resultMessage struct { + r *release.Release + e error +} + +// NewUpgrade creates a new Upgrade object with the given configuration. +func NewUpgrade(cfg *Configuration) *Upgrade { + up := &Upgrade{ + cfg: cfg, + } + up.ChartPathOptions.registryClient = cfg.RegistryClient + + return up +} + +// SetRegistryClient sets the registry client to use when fetching charts. +func (u *Upgrade) SetRegistryClient(client *registry.Client) { + u.ChartPathOptions.registryClient = client +} + +// Run executes the upgrade on the given release. +func (u *Upgrade) Run(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + ctx := context.Background() + return u.RunWithContext(ctx, name, chart, vals) +} + +// RunWithContext executes the upgrade on the given release with context. +func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + u.Wait = u.Wait || u.Atomic + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + + u.cfg.Log("preparing upgrade for %s", name) + currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) + if err != nil { + return nil, err + } + + u.cfg.Releases.MaxHistory = u.MaxHistory + + u.cfg.Log("performing update for %s", name) + res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) + if err != nil { + return res, err + } + + // Do not update for dry runs + if !u.isDryRun() { + u.cfg.Log("updating status for upgraded release for %s", name) + if err := u.cfg.Releases.Update(upgradedRelease); err != nil { + return res, err + } + } + + return res, nil +} + +// isDryRun returns true if Upgrade is set to run as a DryRun +func (u *Upgrade) isDryRun() bool { + if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { + return true + } + return false +} + +// prepareUpgrade builds an upgraded release for an upgrade operation. +func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) { + if chart == nil { + return nil, nil, errMissingChart + } + + // HideSecret must be used with dry run. Otherwise, return an error. + if !u.isDryRun() && u.HideSecret { + return nil, nil, errors.New("Hiding Kubernetes secrets requires a dry-run mode") + } + + // finds the last non-deleted release with the given name + lastRelease, err := u.cfg.Releases.Last(name) + if err != nil { + // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist + if errors.Is(err, driver.ErrReleaseNotFound) { + return nil, nil, driver.NewErrNoDeployedReleases(name) + } + return nil, nil, err + } + + // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock. + if lastRelease.Info.Status.IsPending() { + return nil, nil, errPending + } + + var currentRelease *release.Release + if lastRelease.Info.Status == release.StatusDeployed { + // no need to retrieve the last deployed release from storage as the last release is deployed + currentRelease = lastRelease + } else { + // finds the deployed release with the given name + currentRelease, err = u.cfg.Releases.Deployed(name) + if err != nil { + if errors.Is(err, driver.ErrNoDeployedReleases) && + (lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) { + currentRelease = lastRelease + } else { + return nil, nil, err + } + } + } + + // determine if values will be reused + vals, err = u.reuseValues(chart, currentRelease, vals) + if err != nil { + return nil, nil, err + } + + if err := chartutil.ProcessDependenciesWithMerge(chart, vals); err != nil { + return nil, nil, err + } + + // Increment revision count. This is passed to templates, and also stored on + // the release object. + revision := lastRelease.Version + 1 + + options := chartutil.ReleaseOptions{ + Name: name, + Namespace: currentRelease.Namespace, + Revision: revision, + IsUpgrade: true, + } + + caps, err := u.cfg.getCapabilities() + if err != nil { + return nil, nil, err + } + valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation) + if err != nil { + return nil, nil, err + } + + // Determine whether or not to interact with remote + var interactWithRemote bool + if !u.isDryRun() || u.DryRunOption == "server" || u.DryRunOption == "none" || u.DryRunOption == "false" { + interactWithRemote = true + } + + hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithRemote, u.EnableDNS, u.HideSecret) + if err != nil { + return nil, nil, err + } + + if driver.ContainsSystemLabels(u.Labels) { + return nil, nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + } + + // Store an upgraded release. + upgradedRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: chart, + Config: vals, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: Timestamper(), + Status: release.StatusPendingUpgrade, + Description: "Preparing upgrade", // This should be overwritten later. + }, + Version: revision, + Manifest: manifestDoc.String(), + Hooks: hooks, + Labels: mergeCustomLabels(lastRelease.Labels, u.Labels), + } + + if len(notesTxt) > 0 { + upgradedRelease.Info.Notes = notesTxt + } + err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation) + return currentRelease, upgradedRelease, err +} + +func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release) (*release.Release, error) { + current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false) + if err != nil { + // Checking for removed Kubernetes API error so can provide a more informative error message to the user + // Ref: https://github.com/helm/helm/issues/7219 + if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") { + return upgradedRelease, errors.Wrap(err, "current release manifest contains removed kubernetes api(s) for this "+ + "kubernetes version and it is therefore unable to build the kubernetes "+ + "objects for performing the diff. error from kubernetes") + } + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation) + if err != nil { + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // It is safe to use force only on target because these are resources currently rendered by the chart. + err = target.Visit(setMetadataVisitor(upgradedRelease.Name, upgradedRelease.Namespace, true)) + if err != nil { + return upgradedRelease, err + } + + // Do a basic diff using gvk + name to figure out what new resources are being created so we can validate they don't already exist + existingResources := make(map[string]bool) + for _, r := range current { + existingResources[objectKey(r)] = true + } + + var toBeCreated kube.ResourceList + for _, r := range target { + if !existingResources[objectKey(r)] { + toBeCreated = append(toBeCreated, r) + } + } + + var toBeUpdated kube.ResourceList + if u.TakeOwnership { + toBeUpdated, err = requireAdoption(toBeCreated) + } else { + toBeUpdated, err = existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace) + } + if err != nil { + return nil, errors.Wrap(err, "Unable to continue with update") + } + + toBeUpdated.Visit(func(r *resource.Info, err error) error { + if err != nil { + return err + } + current.Append(r) + return nil + }) + + // Run if it is a dry run + if u.isDryRun() { + u.cfg.Log("dry run for %s", upgradedRelease.Name) + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Dry run complete" + } + return upgradedRelease, nil + } + + u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name) + if err := u.cfg.Releases.Create(upgradedRelease); err != nil { + return nil, err + } + rChan := make(chan resultMessage) + ctxChan := make(chan resultMessage) + doneChan := make(chan interface{}) + defer close(doneChan) + go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease) + go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease) + select { + case result := <-rChan: + return result.r, result.e + case result := <-ctxChan: + return result.r, result.e + } +} + +// Function used to lock the Mutex, this is important for the case when the atomic flag is set. +// In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish. +// The rollback will be trigger by the function failRelease +func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) { + u.Lock.Lock() + if err != nil { + rel, err = u.failRelease(rel, created, err) + } + c <- resultMessage{r: rel, e: err} + u.Lock.Unlock() +} + +// Setup listener for SIGINT and SIGTERM +func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c chan<- resultMessage, upgradedRelease *release.Release) { + select { + case <-ctx.Done(): + err := ctx.Err() + + // when the atomic flag is set the ongoing release finish first and doesn't give time for the rollback happens. + u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err) + case <-done: + return + } +} +func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) { + // pre-upgrade hooks + + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) + return + } + } else { + u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name) + } + + results, err := u.cfg.KubeClient.Update(current, target, u.Force) + if err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + + if u.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(u.cfg, results.Updated); err != nil { + u.cfg.Log(err.Error()) + } + } + + if u.Wait { + u.cfg.Log( + "waiting for release %s resources (created: %d updated: %d deleted: %d)", + upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) + if u.WaitForJobs { + if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } else { + if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } + } + + // post-upgrade hooks + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) + return + } + } + + originalRelease.Info.Status = release.StatusSuperseded + u.cfg.recordRelease(originalRelease) + + upgradedRelease.Info.Status = release.StatusDeployed + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Upgrade complete" + } + u.reportToPerformUpgrade(c, upgradedRelease, nil, nil) +} + +func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { + msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) + u.cfg.Log("warning: %s", msg) + + rel.Info.Status = release.StatusFailed + rel.Info.Description = msg + u.cfg.recordRelease(rel) + if u.CleanupOnFail && len(created) > 0 { + u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created)) + _, errs := u.cfg.KubeClient.Delete(created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) + } + u.cfg.Log("Resource cleanup complete") + } + if u.Atomic { + u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release") + + // As a protection, get the last successful release before rollback. + // If there are no successful releases, bail out + hist := NewHistory(u.cfg) + fullHistory, herr := hist.Run(rel.Name) + if herr != nil { + return rel, errors.Wrapf(herr, "an error occurred while finding last successful release. original upgrade error: %s", err) + } + + // There isn't a way to tell if a previous release was successful, but + // generally failed releases do not get superseded unless the next + // release is successful, so this should be relatively safe + filteredHistory := releaseutil.FilterFunc(func(r *release.Release) bool { + return r.Info.Status == release.StatusSuperseded || r.Info.Status == release.StatusDeployed + }).Filter(fullHistory) + if len(filteredHistory) == 0 { + return rel, errors.Wrap(err, "unable to find a previously successful release when attempting to rollback. original upgrade error") + } + + releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision) + + rollin := NewRollback(u.cfg) + rollin.Version = filteredHistory[0].Version + rollin.Wait = true + rollin.WaitForJobs = u.WaitForJobs + rollin.DisableHooks = u.DisableHooks + rollin.Recreate = u.Recreate + rollin.Force = u.Force + rollin.Timeout = u.Timeout + if rollErr := rollin.Run(rel.Name); rollErr != nil { + return rel, errors.Wrapf(rollErr, "an error occurred while rolling back the release. original upgrade error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been rolled back due to atomic being set", rel.Name) + } + + return rel, err +} + +// reuseValues copies values from the current release to a new release if the +// new release does not have any values. +// +// If the request already has values, or if there are no values in the current +// release, this does nothing. +// +// This is skipped if the u.ResetValues flag is set, in which case the +// request values are not altered. +func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { + if u.ResetValues { + // If ResetValues is set, we completely ignore current.Config. + u.cfg.Log("resetting values to the chart's original version") + return newVals, nil + } + + // If the ReuseValues flag is set, we always copy the old values over the new config's values. + if u.ReuseValues { + u.cfg.Log("reusing the old release's values") + + // We have to regenerate the old coalesced values: + oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to rebuild old values") + } + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + chart.Values = oldVals + + return newVals, nil + } + + // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. + if u.ResetThenReuseValues { + u.cfg.Log("merging values from old release to new values") + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + return newVals, nil + } + + if len(newVals) == 0 && len(current.Config) > 0 { + u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) + newVals = current.Config + } + return newVals, nil +} + +func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) error { + _, err := c.Build(bytes.NewReader(manifest), openAPIValidation) + return err +} + +// recreate captures all the logic for recreating pods for both upgrade and +// rollback. If we end up refactoring rollback to use upgrade, this can just be +// made an unexported method on the upgrade action. +func recreate(cfg *Configuration, resources kube.ResourceList) error { + for _, res := range resources { + versioned := kube.AsVersioned(res) + selector, err := kube.SelectorsForObject(versioned) + if err != nil { + // If no selector is returned, it means this object is + // definitely not a pod, so continue onward + continue + } + + client, err := cfg.KubernetesClientSet() + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + // Restart pods + for _, pod := range pods.Items { + // Delete each pod for get them restarted with changed spec. + if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + } + } + return nil +} + +func objectKey(r *resource.Info) string { + gvk := r.Object.GetObjectKind().GroupVersionKind() + return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name) +} + +func mergeCustomLabels(current, desired map[string]string) map[string]string { + labels := mergeStrStrMaps(current, desired) + for k, v := range labels { + if v == "null" { + delete(labels, k) + } + } + return labels +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/validate.go b/vendor/helm.sh/helm/v3/pkg/action/validate.go new file mode 100644 index 000000000000..127e9bf96237 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/validate.go @@ -0,0 +1,209 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +var accessor = meta.NewAccessor() + +const ( + appManagedByLabel = "app.kubernetes.io/managed-by" + appManagedByHelm = "Helm" + helmReleaseNameAnnotation = "meta.helm.sh/release-name" + helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace" +) + +// requireAdoption returns the subset of resources that already exist in the cluster. +func requireAdoption(resources kube.ResourceList) (kube.ResourceList, error) { + var requireUpdate kube.ResourceList + + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping) + _, err = helper.Get(info.Namespace, info.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info)) + } + + requireUpdate.Append(info) + return nil + }) + + return requireUpdate, err +} + +func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) { + var requireUpdate kube.ResourceList + + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping) + existing, err := helper.Get(info.Namespace, info.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info)) + } + + // Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace. + if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s exists and cannot be imported into the current release: %s", resourceString(info), err) + } + + requireUpdate.Append(info) + return nil + }) + + return requireUpdate, err +} + +func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error { + lbls, err := accessor.Labels(obj) + if err != nil { + return err + } + annos, err := accessor.Annotations(obj) + if err != nil { + return err + } + + var errs []error + if err := requireValue(lbls, appManagedByLabel, appManagedByHelm); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNameAnnotation, releaseName); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNamespaceAnnotation, releaseNamespace); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + + if len(errs) > 0 { + err := errors.New("invalid ownership metadata") + for _, e := range errs { + err = fmt.Errorf("%w; %s", err, e) + } + return err + } + + return nil +} + +func requireValue(meta map[string]string, k, v string) error { + actual, ok := meta[k] + if !ok { + return fmt.Errorf("missing key %q: must be set to %q", k, v) + } + if actual != v { + return fmt.Errorf("key %q must equal %q: current value is %q", k, v, actual) + } + return nil +} + +// setMetadataVisitor adds release tracking metadata to all resources. If force is enabled, existing +// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an +// existing and conflicting value for the managed by label or Helm release/namespace annotations. +func setMetadataVisitor(releaseName, releaseNamespace string, force bool) resource.VisitorFunc { + return func(info *resource.Info, err error) error { + if err != nil { + return err + } + + if !force { + if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err) + } + } + + if err := mergeLabels(info.Object, map[string]string{ + appManagedByLabel: appManagedByHelm, + }); err != nil { + return fmt.Errorf( + "%s labels could not be updated: %s", + resourceString(info), err, + ) + } + + if err := mergeAnnotations(info.Object, map[string]string{ + helmReleaseNameAnnotation: releaseName, + helmReleaseNamespaceAnnotation: releaseNamespace, + }); err != nil { + return fmt.Errorf( + "%s annotations could not be updated: %s", + resourceString(info), err, + ) + } + + return nil + } +} + +func resourceString(info *resource.Info) string { + _, k := info.Mapping.GroupVersionKind.ToAPIVersionAndKind() + return fmt.Sprintf( + "%s %q in namespace %q", + k, info.Name, info.Namespace, + ) +} + +func mergeLabels(obj runtime.Object, labels map[string]string) error { + current, err := accessor.Labels(obj) + if err != nil { + return err + } + return accessor.SetLabels(obj, mergeStrStrMaps(current, labels)) +} + +func mergeAnnotations(obj runtime.Object, annotations map[string]string) error { + current, err := accessor.Annotations(obj) + if err != nil { + return err + } + return accessor.SetAnnotations(obj, mergeStrStrMaps(current, annotations)) +} + +// merge two maps, always taking the value on the right +func mergeStrStrMaps(current, desired map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range current { + result[k] = v + } + for k, desiredVal := range desired { + result[k] = desiredVal + } + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/verify.go b/vendor/helm.sh/helm/v3/pkg/action/verify.go new file mode 100644 index 000000000000..f3623949620a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/verify.go @@ -0,0 +1,59 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "strings" + + "helm.sh/helm/v3/pkg/downloader" +) + +// Verify is the action for building a given chart's Verify tree. +// +// It provides the implementation of 'helm verify'. +type Verify struct { + Keyring string + Out string +} + +// NewVerify creates a new Verify object with the given configuration. +func NewVerify() *Verify { + return &Verify{} +} + +// Run executes 'helm verify'. +func (v *Verify) Run(chartfile string) error { + var out strings.Builder + p, err := downloader.VerifyChart(chartfile, v.Keyring) + if err != nil { + return err + } + + for name := range p.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", p.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", p.FileHash) + + // TODO(mattfarina): The output is set as a property rather than returned + // to maintain the Go API. In Helm v4 this function should return the out + // and the property on the struct can be removed. + v.Out = out.String() + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/chart.go b/vendor/helm.sh/helm/v3/pkg/chart/chart.go new file mode 100644 index 000000000000..a3bed63a38aa --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/chart.go @@ -0,0 +1,173 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "path/filepath" + "regexp" + "strings" +) + +// APIVersionV1 is the API version number for version 1. +const APIVersionV1 = "v1" + +// APIVersionV2 is the API version number for version 2. +const APIVersionV2 = "v2" + +// aliasNameFormat defines the characters that are legal in an alias name. +var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$") + +// Chart is a helm package that contains metadata, a default config, zero or more +// optionally parameterizable templates, and zero or more charts (dependencies). +type Chart struct { + // Raw contains the raw contents of the files originally contained in the chart archive. + // + // This should not be used except in special cases like `helm show values`, + // where we want to display the raw values, comments and all. + Raw []*File `json:"-"` + // Metadata is the contents of the Chartfile. + Metadata *Metadata `json:"metadata"` + // Lock is the contents of Chart.lock. + Lock *Lock `json:"lock"` + // Templates for this chart. + Templates []*File `json:"templates"` + // Values are default config for this chart. + Values map[string]interface{} `json:"values"` + // Schema is an optional JSON schema for imposing structure on Values + Schema []byte `json:"schema"` + // Files are miscellaneous files in a chart archive, + // e.g. README, LICENSE, etc. + Files []*File `json:"files"` + + parent *Chart + dependencies []*Chart +} + +type CRD struct { + // Name is the File.Name for the crd file + Name string + // Filename is the File obj Name including (sub-)chart.ChartFullPath + Filename string + // File is the File obj for the crd + File *File +} + +// SetDependencies replaces the chart dependencies. +func (ch *Chart) SetDependencies(charts ...*Chart) { + ch.dependencies = nil + ch.AddDependency(charts...) +} + +// Name returns the name of the chart. +func (ch *Chart) Name() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.Name +} + +// AddDependency determines if the chart is a subchart. +func (ch *Chart) AddDependency(charts ...*Chart) { + for i, x := range charts { + charts[i].parent = ch + ch.dependencies = append(ch.dependencies, x) + } +} + +// Root finds the root chart. +func (ch *Chart) Root() *Chart { + if ch.IsRoot() { + return ch + } + return ch.Parent().Root() +} + +// Dependencies are the charts that this chart depends on. +func (ch *Chart) Dependencies() []*Chart { return ch.dependencies } + +// IsRoot determines if the chart is the root chart. +func (ch *Chart) IsRoot() bool { return ch.parent == nil } + +// Parent returns a subchart's parent chart. +func (ch *Chart) Parent() *Chart { return ch.parent } + +// ChartPath returns the full path to this chart in dot notation. +func (ch *Chart) ChartPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartPath() + "." + ch.Name() + } + return ch.Name() +} + +// ChartFullPath returns the full path to this chart. +func (ch *Chart) ChartFullPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartFullPath() + "/charts/" + ch.Name() + } + return ch.Name() +} + +// Validate validates the metadata. +func (ch *Chart) Validate() error { + return ch.Metadata.Validate() +} + +// AppVersion returns the appversion of the chart. +func (ch *Chart) AppVersion() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.AppVersion +} + +// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart. +// Deprecated: use CRDObjects() +func (ch *Chart) CRDs() []*File { + files := []*File{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + files = append(files, f) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + files = append(files, dep.CRDs()...) + } + return files +} + +// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts +func (ch *Chart) CRDObjects() []CRD { + crds := []CRD{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f} + crds = append(crds, mycrd) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + crds = append(crds, dep.CRDObjects()...) + } + return crds +} + +func hasManifestExtension(fname string) bool { + ext := filepath.Ext(fname) + return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json") +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go new file mode 100644 index 000000000000..eda0f5a89d35 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "time" + +// Dependency describes a chart upon which another chart depends. +// +// Dependencies can be used to express developer intent, or to capture the state +// of a chart. +type Dependency struct { + // Name is the name of the dependency. + // + // This must mach the name in the dependency's Chart.yaml. + Name string `json:"name" yaml:"name"` + // Version is the version (range) of this chart. + // + // A lock file will always produce a single version, while a dependency + // may contain a semantic version range. + Version string `json:"version,omitempty" yaml:"version,omitempty"` + // The URL to the repository. + // + // Appending `index.yaml` to this string should result in a URL that can be + // used to fetch the repository index. + Repository string `json:"repository" yaml:"repository"` + // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled ) + Condition string `json:"condition,omitempty" yaml:"condition,omitempty"` + // Tags can be used to group charts for enabling/disabling together + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + // Enabled bool determines if chart should be loaded + Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a + // string or pair of child/parent sublist items. + ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"` + // Alias usable alias to be used for the chart + Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` +} + +// Validate checks for common problems with the dependency datastructure in +// the chart. This check must be done at load time before the dependency's charts are +// loaded. +func (d *Dependency) Validate() error { + if d == nil { + return ValidationError("dependencies must not contain empty or null nodes") + } + d.Name = sanitizeString(d.Name) + d.Version = sanitizeString(d.Version) + d.Repository = sanitizeString(d.Repository) + d.Condition = sanitizeString(d.Condition) + for i := range d.Tags { + d.Tags[i] = sanitizeString(d.Tags[i]) + } + if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) { + return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name) + } + return nil +} + +// Lock is a lock file for dependencies. +// +// It represents the state that the dependencies should be in. +type Lock struct { + // Generated is the date the lock file was last generated. + Generated time.Time `json:"generated"` + // Digest is a hash of the dependencies in Chart.yaml. + Digest string `json:"digest"` + // Dependencies is the list of dependencies that this lock file has locked. + Dependencies []*Dependency `json:"dependencies"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/errors.go b/vendor/helm.sh/helm/v3/pkg/chart/errors.go new file mode 100644 index 000000000000..2fad5f37081b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/errors.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "fmt" + +// ValidationError represents a data validation error. +type ValidationError string + +func (v ValidationError) Error() string { + return "validation: " + string(v) +} + +// ValidationErrorf takes a message and formatting options and creates a ValidationError +func ValidationErrorf(msg string, args ...interface{}) ValidationError { + return ValidationError(fmt.Sprintf(msg, args...)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/file.go b/vendor/helm.sh/helm/v3/pkg/chart/file.go new file mode 100644 index 000000000000..9dd7c08d5255 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/file.go @@ -0,0 +1,27 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +// File represents a file as a name/value pair. +// +// By convention, name is a relative path within the scope of the chart's +// base directory. +type File struct { + // Name is the path-like name of the template. + Name string `json:"name"` + // Data is the template as byte data. + Data []byte `json:"data"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go new file mode 100644 index 000000000000..6272a564f8d7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go @@ -0,0 +1,235 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +// MaxDecompressedChartSize is the maximum size of a chart archive that will be +// decompressed. This is the decompressed size of all the files. +// The default value is 100 MiB. +var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB + +// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load. +// The size of the file is the decompressed version of it when it is stored in an archive. +var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB + +var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) + +// FileLoader loads a chart from a file +type FileLoader string + +// Load loads a chart +func (l FileLoader) Load() (*chart.Chart, error) { + return LoadFile(string(l)) +} + +// LoadFile loads from an archive file. +func LoadFile(name string) (*chart.Chart, error) { + if fi, err := os.Stat(name); err != nil { + return nil, err + } else if fi.IsDir() { + return nil, errors.New("cannot load a directory") + } + + raw, err := os.Open(name) + if err != nil { + return nil, err + } + defer raw.Close() + + err = ensureArchive(name, raw) + if err != nil { + return nil, err + } + + c, err := LoadArchive(raw) + if err != nil { + if err == gzip.ErrHeader { + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + } + } + return c, err +} + +// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive. +// +// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence +// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error +// if we didn't check for this. +func ensureArchive(name string, raw *os.File) error { + defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed. + + // Check the file format to give us a chance to provide the user with more actionable feedback. + buffer := make([]byte, 512) + _, err := raw.Read(buffer) + if err != nil && err != io.EOF { + return fmt.Errorf("file '%s' cannot be read: %s", name, err) + } + + // Helm may identify achieve of the application/x-gzip as application/vnd.ms-fontobject. + // Fix for: https://github.com/helm/helm/issues/12261 + if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" && !isGZipApplication(buffer) { + // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide + // variety of content (Makefile, .zshrc) as valid YAML without errors. + + // Wrong content type. Let's check if it's yaml and give an extra hint? + if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") { + return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name) + } + return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType) + } + return nil +} + +// isGZipApplication checks whether the archive is of the application/x-gzip type. +func isGZipApplication(data []byte) bool { + sig := []byte("\x1F\x8B\x08") + return bytes.HasPrefix(data, sig) +} + +// LoadArchiveFiles reads in files out of an archive into memory. This function +// performs important path security checks and should always be used before +// expanding a tarball +func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { + unzipped, err := gzip.NewReader(in) + if err != nil { + return nil, err + } + defer unzipped.Close() + + files := []*BufferedFile{} + tr := tar.NewReader(unzipped) + remainingSize := MaxDecompressedChartSize + for { + b := bytes.NewBuffer(nil) + hd, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + if hd.FileInfo().IsDir() { + // Use this instead of hd.Typeflag because we don't have to do any + // inference chasing. + continue + } + + switch hd.Typeflag { + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + } + + // Archive could contain \ if generated on Windows + delimiter := "/" + if strings.ContainsRune(hd.Name, '\\') { + delimiter = "\\" + } + + parts := strings.Split(hd.Name, delimiter) + n := strings.Join(parts[1:], delimiter) + + // Normalize the path to the / delimiter + n = strings.ReplaceAll(n, delimiter, "/") + + if path.IsAbs(n) { + return nil, errors.New("chart illegally contains absolute paths") + } + + n = path.Clean(n) + if n == "." { + // In this case, the original path was relative when it should have been absolute. + return nil, errors.Errorf("chart illegally contains content outside the base directory: %q", hd.Name) + } + if strings.HasPrefix(n, "..") { + return nil, errors.New("chart illegally references parent directory") + } + + // In some particularly arcane acts of path creativity, it is possible to intermix + // UNIX and Windows style paths in such a way that you produce a result of the form + // c:/foo even after all the built-in absolute path checks. So we explicitly check + // for this condition. + if drivePathPattern.MatchString(n) { + return nil, errors.New("chart contains illegally named files") + } + + if parts[0] == "Chart.yaml" { + return nil, errors.New("chart yaml not in base directory") + } + + if hd.Size > remainingSize { + return nil, fmt.Errorf("decompressed chart is larger than the maximum file size %d", MaxDecompressedChartSize) + } + + if hd.Size > MaxDecompressedFileSize { + return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize) + } + + limitedReader := io.LimitReader(tr, remainingSize) + + bytesWritten, err := io.Copy(b, limitedReader) + if err != nil { + return nil, err + } + + remainingSize -= bytesWritten + // When the bytesWritten are less than the file size it means the limit reader ended + // copying early. Here we report that error. This is important if the last file extracted + // is the one that goes over the limit. It assumes the Size stored in the tar header + // is correct, something many applications do. + if bytesWritten < hd.Size || remainingSize <= 0 { + return nil, fmt.Errorf("decompressed chart is larger than the maximum file size %d", MaxDecompressedChartSize) + } + + data := bytes.TrimPrefix(b.Bytes(), utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + b.Reset() + } + + if len(files) == 0 { + return nil, errors.New("no files in chart archive") + } + return files, nil +} + +// LoadArchive loads from a reader containing a compressed tar archive. +func LoadArchive(in io.Reader) (*chart.Chart, error) { + files, err := LoadArchiveFiles(in) + if err != nil { + return nil, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go new file mode 100644 index 000000000000..fd8e02e1ac9e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go @@ -0,0 +1,123 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/sympath" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/ignore" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// DirLoader loads a chart from a directory +type DirLoader string + +// Load loads the chart +func (l DirLoader) Load() (*chart.Chart, error) { + return LoadDir(string(l)) +} + +// LoadDir loads from a directory. +// +// This loads charts only from directories. +func LoadDir(dir string) (*chart.Chart, error) { + topdir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + + // Just used for errors. + c := &chart.Chart{} + + rules := ignore.Empty() + ifile := filepath.Join(topdir, ignore.HelmIgnore) + if _, err := os.Stat(ifile); err == nil { + r, err := ignore.ParseFile(ifile) + if err != nil { + return c, err + } + rules = r + } + rules.AddDefaults() + + files := []*BufferedFile{} + topdir += string(filepath.Separator) + + walk := func(name string, fi os.FileInfo, err error) error { + n := strings.TrimPrefix(name, topdir) + if n == "" { + // No need to process top level. Avoid bug with helmignore .* matching + // empty names. See issue 1779. + return nil + } + + // Normalize to / since it will also work on Windows + n = filepath.ToSlash(n) + + if err != nil { + return err + } + if fi.IsDir() { + // Directory-based ignore rules should involve skipping the entire + // contents of that directory. + if rules.Ignore(n, fi) { + return filepath.SkipDir + } + return nil + } + + // If a .helmignore file matches, skip this file. + if rules.Ignore(n, fi) { + return nil + } + + // Irregular files include devices, sockets, and other uses of files that + // are not regular files. In Go they have a file mode type bit set. + // See https://golang.org/pkg/os/#FileMode for examples. + if !fi.Mode().IsRegular() { + return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) + } + + if fi.Size() > MaxDecompressedFileSize { + return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize) + } + + data, err := os.ReadFile(name) + if err != nil { + return errors.Wrapf(err, "error reading %s", n) + } + + data = bytes.TrimPrefix(data, utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + return nil + } + if err = sympath.Walk(topdir, walk); err != nil { + return c, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go new file mode 100644 index 000000000000..622dac4b83dd --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go @@ -0,0 +1,207 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "encoding/json" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ChartLoader loads a chart. +type ChartLoader interface { + Load() (*chart.Chart, error) +} + +// Loader returns a new ChartLoader appropriate for the given chart name +func Loader(name string) (ChartLoader, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if fi.IsDir() { + return DirLoader(name), nil + } + return FileLoader(name), nil + +} + +// Load takes a string name, tries to resolve it to a file or directory, and then loads it. +// +// This is the preferred way to load a chart. It will discover the chart encoding +// and hand off to the appropriate chart reader. +// +// If a .helmignore file is present, the directory loader will skip loading any files +// matching it. But .helmignore is not evaluated when reading out of an archive. +func Load(name string) (*chart.Chart, error) { + l, err := Loader(name) + if err != nil { + return nil, err + } + return l.Load() +} + +// BufferedFile represents an archive file buffered for later processing. +type BufferedFile struct { + Name string + Data []byte +} + +// LoadFiles loads from in-memory files. +func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { + c := new(chart.Chart) + subcharts := make(map[string][]*BufferedFile) + + // do not rely on assumed ordering of files in the chart and crash + // if Chart.yaml was not coming early enough to initialize metadata + for _, f := range files { + c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data}) + if f.Name == "Chart.yaml" { + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load Chart.yaml") + } + // NOTE(bacongobbler): while the chart specification says that APIVersion must be set, + // Helm 2 accepted charts that did not provide an APIVersion in their chart metadata. + // Because of that, if APIVersion is unset, we should assume we're loading a v1 chart. + if c.Metadata.APIVersion == "" { + c.Metadata.APIVersion = chart.APIVersionV1 + } + } + } + for _, f := range files { + switch { + case f.Name == "Chart.yaml": + // already processed + continue + case f.Name == "Chart.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load Chart.lock") + } + case f.Name == "values.yaml": + c.Values = make(map[string]interface{}) + if err := yaml.Unmarshal(f.Data, &c.Values, func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + }); err != nil { + return c, errors.Wrap(err, "cannot load values.yaml") + } + case f.Name == "values.schema.json": + c.Schema = f.Data + + // Deprecated: requirements.yaml is deprecated use Chart.yaml. + // We will handle it for you because we are nice people + case f.Name == "requirements.yaml": + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion != chart.APIVersionV1 { + log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.") + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load requirements.yaml") + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + // Deprecated: requirements.lock is deprecated use Chart.lock. + case f.Name == "requirements.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load requirements.lock") + } + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion != chart.APIVersionV1 { + log.Printf("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.") + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + + case strings.HasPrefix(f.Name, "templates/"): + c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data}) + case strings.HasPrefix(f.Name, "charts/"): + if filepath.Ext(f.Name) == ".prov" { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + continue + } + + fname := strings.TrimPrefix(f.Name, "charts/") + cname := strings.SplitN(fname, "/", 2)[0] + subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data}) + default: + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + } + + if c.Metadata == nil { + return c, errors.New("Chart.yaml file is missing") + } + + if err := c.Validate(); err != nil { + return c, err + } + + for n, files := range subcharts { + var sc *chart.Chart + var err error + switch { + case strings.IndexAny(n, "_.") == 0: + continue + case filepath.Ext(n) == ".tgz": + file := files[0] + if file.Name != n { + return c, errors.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name) + } + // Untar the chart and add to c.Dependencies + sc, err = LoadArchive(bytes.NewBuffer(file.Data)) + default: + // We have to trim the prefix off of every file, and ignore any file + // that is in charts/, but isn't actually a chart. + buff := make([]*BufferedFile, 0, len(files)) + for _, f := range files { + parts := strings.SplitN(f.Name, "/", 2) + if len(parts) < 2 { + continue + } + f.Name = parts[1] + buff = append(buff, f) + } + sc, err = LoadFiles(buff) + } + + if err != nil { + return c, errors.Wrapf(err, "error unpacking subchart %s in %s", n, c.Name()) + } + c.AddDependency(sc) + } + + return c, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go new file mode 100644 index 000000000000..a08a97cd1dbc --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -0,0 +1,178 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "path/filepath" + "strings" + "unicode" + + "github.com/Masterminds/semver/v3" +) + +// Maintainer describes a Chart maintainer. +type Maintainer struct { + // Name is a user name or organization name + Name string `json:"name,omitempty"` + // Email is an optional email address to contact the named maintainer + Email string `json:"email,omitempty"` + // URL is an optional URL to an address for the named maintainer + URL string `json:"url,omitempty"` +} + +// Validate checks valid data and sanitizes string characters. +func (m *Maintainer) Validate() error { + if m == nil { + return ValidationError("maintainers must not contain empty or null nodes") + } + m.Name = sanitizeString(m.Name) + m.Email = sanitizeString(m.Email) + m.URL = sanitizeString(m.URL) + return nil +} + +// Metadata for a Chart file. This models the structure of a Chart.yaml file. +type Metadata struct { + // The name of the chart. Required. + Name string `json:"name,omitempty"` + // The URL to a relevant project page, git repo, or contact person + Home string `json:"home,omitempty"` + // Source is the URL to the source code of this chart + Sources []string `json:"sources,omitempty"` + // A SemVer 2 conformant version string of the chart. Required. + Version string `json:"version,omitempty"` + // A one-sentence description of the chart + Description string `json:"description,omitempty"` + // A list of string keywords + Keywords []string `json:"keywords,omitempty"` + // A list of name and URL/email address combinations for the maintainer(s) + Maintainers []*Maintainer `json:"maintainers,omitempty"` + // The URL to an icon file. + Icon string `json:"icon,omitempty"` + // The API Version of this chart. Required. + APIVersion string `json:"apiVersion,omitempty"` + // The condition to check to enable chart + Condition string `json:"condition,omitempty"` + // The tags to check to enable chart + Tags string `json:"tags,omitempty"` + // The version of the application enclosed inside of this chart. + AppVersion string `json:"appVersion,omitempty"` + // Whether or not this chart is deprecated + Deprecated bool `json:"deprecated,omitempty"` + // Annotations are additional mappings uninterpreted by Helm, + // made available for inspection by other applications. + Annotations map[string]string `json:"annotations,omitempty"` + // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. + KubeVersion string `json:"kubeVersion,omitempty"` + // Dependencies are a list of dependencies for a chart. + Dependencies []*Dependency `json:"dependencies,omitempty"` + // Specifies the chart type: application or library + Type string `json:"type,omitempty"` +} + +// Validate checks the metadata for known issues and sanitizes string +// characters. +func (md *Metadata) Validate() error { + if md == nil { + return ValidationError("chart.metadata is required") + } + + md.Name = sanitizeString(md.Name) + md.Description = sanitizeString(md.Description) + md.Home = sanitizeString(md.Home) + md.Icon = sanitizeString(md.Icon) + md.Condition = sanitizeString(md.Condition) + md.Tags = sanitizeString(md.Tags) + md.AppVersion = sanitizeString(md.AppVersion) + md.KubeVersion = sanitizeString(md.KubeVersion) + for i := range md.Sources { + md.Sources[i] = sanitizeString(md.Sources[i]) + } + for i := range md.Keywords { + md.Keywords[i] = sanitizeString(md.Keywords[i]) + } + + if md.APIVersion == "" { + return ValidationError("chart.metadata.apiVersion is required") + } + if md.Name == "" { + return ValidationError("chart.metadata.name is required") + } + + if md.Name != filepath.Base(md.Name) { + return ValidationErrorf("chart.metadata.name %q is invalid", md.Name) + } + + if md.Version == "" { + return ValidationError("chart.metadata.version is required") + } + if !isValidSemver(md.Version) { + return ValidationErrorf("chart.metadata.version %q is invalid", md.Version) + } + if !isValidChartType(md.Type) { + return ValidationError("chart.metadata.type must be application or library") + } + + for _, m := range md.Maintainers { + if err := m.Validate(); err != nil { + return err + } + } + + // Aliases need to be validated here to make sure that the alias name does + // not contain any illegal characters. + dependencies := map[string]*Dependency{} + for _, dependency := range md.Dependencies { + if err := dependency.Validate(); err != nil { + return err + } + key := dependency.Name + if dependency.Alias != "" { + key = dependency.Alias + } + if dependencies[key] != nil { + return ValidationErrorf("more than one dependency with name or alias %q", key) + } + dependencies[key] = dependency + } + return nil +} + +func isValidChartType(in string) bool { + switch in { + case "", "application", "library": + return true + } + return false +} + +func isValidSemver(v string) bool { + _, err := semver.NewVersion(v) + return err == nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go new file mode 100644 index 000000000000..48fab0ea40d2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go @@ -0,0 +1,126 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "strconv" + + "github.com/Masterminds/semver/v3" + "k8s.io/client-go/kubernetes/scheme" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + helmversion "helm.sh/helm/v3/internal/version" +) + +var ( + // The Kubernetes version can be set by LDFLAGS. In order to do that the value + // must be a string. + k8sVersionMajor = "1" + k8sVersionMinor = "20" + + // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). + DefaultVersionSet = allKnownVersions() + + // DefaultCapabilities is the default set of capabilities. + DefaultCapabilities = &Capabilities{ + KubeVersion: KubeVersion{ + Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), + Major: k8sVersionMajor, + Minor: k8sVersionMinor, + }, + APIVersions: DefaultVersionSet, + HelmVersion: helmversion.Get(), + } +) + +// Capabilities describes the capabilities of the Kubernetes cluster. +type Capabilities struct { + // KubeVersion is the Kubernetes version. + KubeVersion KubeVersion + // APIVersions are supported Kubernetes API versions. + APIVersions VersionSet + // HelmVersion is the build information for this helm version + HelmVersion helmversion.BuildInfo +} + +func (capabilities *Capabilities) Copy() *Capabilities { + return &Capabilities{ + KubeVersion: capabilities.KubeVersion, + APIVersions: capabilities.APIVersions, + HelmVersion: capabilities.HelmVersion, + } +} + +// KubeVersion is the Kubernetes version. +type KubeVersion struct { + Version string // Kubernetes version + Major string // Kubernetes major version + Minor string // Kubernetes minor version +} + +// String implements fmt.Stringer +func (kv *KubeVersion) String() string { return kv.Version } + +// GitVersion returns the Kubernetes version string. +// +// Deprecated: use KubeVersion.Version. +func (kv *KubeVersion) GitVersion() string { return kv.Version } + +// ParseKubeVersion parses kubernetes version from string +func ParseKubeVersion(version string) (*KubeVersion, error) { + sv, err := semver.NewVersion(version) + if err != nil { + return nil, err + } + return &KubeVersion{ + Version: "v" + sv.String(), + Major: strconv.FormatUint(sv.Major(), 10), + Minor: strconv.FormatUint(sv.Minor(), 10), + }, nil +} + +// VersionSet is a set of Kubernetes API versions. +type VersionSet []string + +// Has returns true if the version string is in the set. +// +// vs.Has("apps/v1") +func (v VersionSet) Has(apiVersion string) bool { + for _, x := range v { + if x == apiVersion { + return true + } + } + return false +} + +func allKnownVersions() VersionSet { + // We should register the built in extension APIs as well so CRDs are + // supported in the default version set. This has caused problems with `helm + // template` in the past, so let's be safe + apiextensionsv1beta1.AddToScheme(scheme.Scheme) + apiextensionsv1.AddToScheme(scheme.Scheme) + + groups := scheme.Scheme.PrioritizedVersionsAllGroups() + vs := make(VersionSet, 0, len(groups)) + for _, gv := range groups { + vs = append(vs, gv.String()) + } + return vs +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go new file mode 100644 index 000000000000..4f537a6e76dc --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go @@ -0,0 +1,92 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// LoadChartfile loads a Chart.yaml file into a *chart.Metadata. +func LoadChartfile(filename string) (*chart.Metadata, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + y := new(chart.Metadata) + err = yaml.Unmarshal(b, y) + return y, err +} + +// SaveChartfile saves the given metadata as a Chart.yaml file at the given path. +// +// 'filename' should be the complete path and filename ('foo/Chart.yaml') +func SaveChartfile(filename string, cf *chart.Metadata) error { + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := cf.Dependencies + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = nil + } + out, err := yaml.Marshal(cf) + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = savedDependencies + } + if err != nil { + return err + } + return os.WriteFile(filename, out, 0644) +} + +// IsChartDir validate a chart directory. +// +// Checks for a valid Chart.yaml. +func IsChartDir(dirName string) (bool, error) { + if fi, err := os.Stat(dirName); err != nil { + return false, err + } else if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", dirName) + } + + chartYaml := filepath.Join(dirName, ChartfileName) + if _, err := os.Stat(chartYaml); os.IsNotExist(err) { + return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName) + } + + chartYamlContent, err := os.ReadFile(chartYaml) + if err != nil { + return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName) + } + + chartContent := new(chart.Metadata) + if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil { + return false, err + } + if chartContent == nil { + return false, errors.Errorf("chart metadata (%s) missing", ChartfileName) + } + if chartContent.Name == "" { + return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName) + } + + return true, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go new file mode 100644 index 000000000000..40bce2a68e46 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -0,0 +1,305 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +func concatPrefix(a, b string) string { + if a == "" { + return b + } + return fmt.Sprintf("%s.%s", a, b) +} + +// CoalesceValues coalesces all of the values in a chart (and its subcharts). +// +// Values are coalesced together using the following rules: +// +// - Values in a higher level chart always override values in a lower-level +// dependency chart +// - Scalar values and arrays are replaced, maps are merged +// - A chart has access to all of the variables for it, as well as all of +// the values destined for its dependencies. +func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { + valsCopy, err := copyValues(vals) + if err != nil { + return vals, err + } + return coalesce(log.Printf, chrt, valsCopy, "", false) +} + +// MergeValues is used to merge the values in a chart and its subcharts. This +// is different from Coalescing as nil/null values are preserved. +// +// Values are coalesced together using the following rules: +// +// - Values in a higher level chart always override values in a lower-level +// dependency chart +// - Scalar values and arrays are replaced, maps are merged +// - A chart has access to all of the variables for it, as well as all of +// the values destined for its dependencies. +// +// Retaining Nils is useful when processes early in a Helm action or business +// logic need to retain them for when Coalescing will happen again later in the +// business logic. +func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { + valsCopy, err := copyValues(vals) + if err != nil { + return vals, err + } + return coalesce(log.Printf, chrt, valsCopy, "", true) +} + +func copyValues(vals map[string]interface{}) (Values, error) { + v, err := copystructure.Copy(vals) + if err != nil { + return vals, err + } + + valsCopy := v.(map[string]interface{}) + // if we have an empty map, make sure it is initialized + if valsCopy == nil { + valsCopy = make(map[string]interface{}) + } + + return valsCopy, nil +} + +type printFn func(format string, v ...interface{}) + +// coalesce coalesces the dest values and the chart values, giving priority to the dest values. +// +// This is a helper function for CoalesceValues and MergeValues. +// +// Note, the merge argument specifies whether this is being used by MergeValues +// or CoalesceValues. Coalescing removes null values and their keys in some +// situations while merging keeps the null values. +func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { + coalesceValues(printf, ch, dest, prefix, merge) + return coalesceDeps(printf, ch, dest, prefix, merge) +} + +// coalesceDeps coalesces the dependencies of the given chart. +func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { + for _, subchart := range chrt.Dependencies() { + if c, ok := dest[subchart.Name()]; !ok { + // If dest doesn't already have the key, create it. + dest[subchart.Name()] = make(map[string]interface{}) + } else if !istable(c) { + return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c) + } + if dv, ok := dest[subchart.Name()]; ok { + dvmap := dv.(map[string]interface{}) + subPrefix := concatPrefix(prefix, chrt.Metadata.Name) + // Get globals out of dest and merge them into dvmap. + coalesceGlobals(printf, dvmap, dest, subPrefix, merge) + // Now coalesce the rest of the values. + var err error + dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge) + if err != nil { + return dest, err + } + } + } + return dest, nil +} + +// coalesceGlobals copies the globals out of src and merges them into dest. +// +// For convenience, returns dest. +func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { + var dg, sg map[string]interface{} + + if destglob, ok := dest[GlobalKey]; !ok { + dg = make(map[string]interface{}) + } else if dg, ok = destglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because destination %s is not a table.", GlobalKey) + return + } + + if srcglob, ok := src[GlobalKey]; !ok { + sg = make(map[string]interface{}) + } else if sg, ok = srcglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because source %s is not a table.", GlobalKey) + return + } + + // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This + // reverses that decision. It may somehow be possible to introduce a loop + // here, but I haven't found a way. So for the time being, let's allow + // tables in globals. + for key, val := range sg { + if istable(val) { + vv := copyMap(val.(map[string]interface{})) + if destv, ok := dg[key]; !ok { + // Here there is no merge. We're just adding. + dg[key] = vv + } else { + if destvmap, ok := destv.(map[string]interface{}); !ok { + printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) + } else { + // Basically, we reverse order of coalesce here to merge + // top-down. + subPrefix := concatPrefix(prefix, key) + // In this location coalesceTablesFullKey should always have + // merge set to true. The output of coalesceGlobals is run + // through coalesce where any nils will be removed. + coalesceTablesFullKey(printf, vv, destvmap, subPrefix, true) + dg[key] = vv + } + } + } else if dv, ok := dg[key]; ok && istable(dv) { + // It's not clear if this condition can actually ever trigger. + printf("key %s is table. Skipping", key) + } else { + // TODO: Do we need to do any additional checking on the value? + dg[key] = val + } + } + dest[GlobalKey] = dg +} + +func copyMap(src map[string]interface{}) map[string]interface{} { + m := make(map[string]interface{}, len(src)) + for k, v := range src { + m[k] = v + } + return m +} + +// coalesceValues builds up a values map for a particular chart. +// +// Values in v will override the values in the chart. +func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string, merge bool) { + subPrefix := concatPrefix(prefix, c.Metadata.Name) + + // Using c.Values directly when coalescing a table can cause problems where + // the original c.Values is altered. Creating a deep copy stops the problem. + // This section is fault-tolerant as there is no ability to return an error. + valuesCopy, err := copystructure.Copy(c.Values) + var vc map[string]interface{} + var ok bool + if err != nil { + // If there is an error something is wrong with copying c.Values it + // means there is a problem in the deep copying package or something + // wrong with c.Values. In this case we will use c.Values and report + // an error. + printf("warning: unable to copy values, err: %s", err) + vc = c.Values + } else { + vc, ok = valuesCopy.(map[string]interface{}) + if !ok { + // c.Values has a map[string]interface{} structure. If the copy of + // it cannot be treated as map[string]interface{} there is something + // strangely wrong. Log it and use c.Values + printf("warning: unable to convert values copy to values type") + vc = c.Values + } + } + + for key, val := range vc { + if value, ok := v[key]; ok { + if value == nil && !merge { + // When the YAML value is null and we are coalescing instead of + // merging, we remove the value's key. + // This allows Helm's various sources of values (value files or --set) to + // remove incompatible keys from any previous chart, file, or set values. + delete(v, key) + } else if dest, ok := value.(map[string]interface{}); ok { + // if v[key] is a table, merge nv's val table into v[key]. + src, ok := val.(map[string]interface{}) + if !ok { + // If the original value is nil, there is nothing to coalesce, so we don't print + // the warning + if val != nil { + printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) + } + } else { + // If the key is a child chart, coalesce tables with Merge set to true + merge := childChartMergeTrue(c, key, merge) + + // Because v has higher precedence than nv, dest values override src + // values. + coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) + } + } + } else { + // If the key is not in v, copy it from nv. + v[key] = val + } + } +} + +func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool { + for _, subchart := range chrt.Dependencies() { + if subchart.Name() == key { + return true + } + } + return merge +} + +// CoalesceTables merges a source map into a destination map. +// +// dest is considered authoritative. +func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { + return coalesceTablesFullKey(log.Printf, dst, src, "", false) +} + +func MergeTables(dst, src map[string]interface{}) map[string]interface{} { + return coalesceTablesFullKey(log.Printf, dst, src, "", true) +} + +// coalesceTablesFullKey merges a source map into a destination map. +// +// dest is considered authoritative. +func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string, merge bool) map[string]interface{} { + // When --reuse-values is set but there are no modifications yet, return new values + if src == nil { + return dst + } + if dst == nil { + return src + } + // Because dest has higher precedence than src, dest values override src + // values. + for key, val := range src { + fullkey := concatPrefix(prefix, key) + if dv, ok := dst[key]; ok && !merge && dv == nil { + delete(dst, key) + } else if !ok { + dst[key] = val + } else if istable(val) { + if istable(dv) { + coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) + } else { + printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) + } + } else if istable(dv) && val != nil { + printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) + } + } + return dst +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go new file mode 100644 index 000000000000..f4656c913572 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go @@ -0,0 +1,34 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import "github.com/Masterminds/semver/v3" + +// IsCompatibleRange compares a version to a constraint. +// It returns true if the version matches the constraint, and false in all other cases. +func IsCompatibleRange(constraint, ver string) bool { + sv, err := semver.NewVersion(ver) + if err != nil { + return false + } + + c, err := semver.NewConstraint(constraint) + if err != nil { + return false + } + return c.Check(sv) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go new file mode 100644 index 000000000000..321d3d2c047b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -0,0 +1,735 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// chartName is a regular expression for testing the supplied name of a chart. +// This regular expression is probably stricter than it needs to be. We can relax it +// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be +// problematic. +var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$") + +const ( + // ChartfileName is the default Chart file name. + ChartfileName = "Chart.yaml" + // ValuesfileName is the default values file name. + ValuesfileName = "values.yaml" + // SchemafileName is the default values schema file name. + SchemafileName = "values.schema.json" + // TemplatesDir is the relative directory name for templates. + TemplatesDir = "templates" + // ChartsDir is the relative directory name for charts dependencies. + ChartsDir = "charts" + // TemplatesTestsDir is the relative directory name for tests. + TemplatesTestsDir = TemplatesDir + sep + "tests" + // IgnorefileName is the name of the Helm ignore file. + IgnorefileName = ".helmignore" + // IngressFileName is the name of the example ingress file. + IngressFileName = TemplatesDir + sep + "ingress.yaml" + // DeploymentName is the name of the example deployment file. + DeploymentName = TemplatesDir + sep + "deployment.yaml" + // ServiceName is the name of the example service file. + ServiceName = TemplatesDir + sep + "service.yaml" + // ServiceAccountName is the name of the example serviceaccount file. + ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml" + // HorizontalPodAutoscalerName is the name of the example hpa file. + HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml" + // NotesName is the name of the example NOTES.txt file. + NotesName = TemplatesDir + sep + "NOTES.txt" + // HelpersName is the name of the example helpers file. + HelpersName = TemplatesDir + sep + "_helpers.tpl" + // TestConnectionName is the name of the example test file. + TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml" +) + +// maxChartNameLength is lower than the limits we know of with certain file systems, +// and with certain Kubernetes fields. +const maxChartNameLength = 250 + +const sep = string(filepath.Separator) + +const defaultChartfile = `apiVersion: v2 +name: %s +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" +` + +const defaultValues = `# Default values for %s. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ +replicaCount: 1 + +# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ +image: + repository: nginx + # This sets the pull policy for images. + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# This is to override the chart name. +nameOverride: "" +fullnameOverride: "" + +# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# This is for setting Kubernetes Annotations to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} +# This is for setting Kubernetes Labels to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ +service: + # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: ClusterIP + # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports + port: 80 + +# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} +` + +const defaultIgnore = `# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +` + +const defaultIngress = `{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.ingress.className }} + ingressClassName: {{ . }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- with .pathType }} + pathType: {{ . }} + {{- end }} + backend: + service: + name: {{ include ".fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- end }} +{{- end }} +` + +const defaultDeployment = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include ".selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include ".labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include ".serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +` + +const defaultService = `apiVersion: v1 +kind: Service +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include ".selectorLabels" . | nindent 4 }} +` + +const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include ".serviceAccountName" . }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} +` + +const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} +` + +const defaultNotes = `1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +` + +const defaultHelpers = `{{/* +Expand the name of the chart. +*/}} +{{- define ".name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define ".labels" -}} +helm.sh/chart: {{ include ".chart" . }} +{{ include ".selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define ".selectorLabels" -}} +app.kubernetes.io/name: {{ include ".name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define ".serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include ".fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} +` + +const defaultTestConnection = `apiVersion: v1 +kind: Pod +metadata: + name: "{{ include ".fullname" . }}-test-connection" + labels: + {{- include ".labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never +` + +// Stderr is an io.Writer to which error messages can be written +// +// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward +// compatibility. +var Stderr io.Writer = os.Stderr + +// CreateFrom creates a new chart, but scaffolds it from the src chart. +func CreateFrom(chartfile *chart.Metadata, dest, src string) error { + schart, err := loader.Load(src) + if err != nil { + return errors.Wrapf(err, "could not load %s", src) + } + + schart.Metadata = chartfile + + var updatedTemplates []*chart.File + + for _, template := range schart.Templates { + newData := transform(string(template.Data), schart.Name()) + updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) + } + + schart.Templates = updatedTemplates + b, err := yaml.Marshal(schart.Values) + if err != nil { + return errors.Wrap(err, "reading values file") + } + + var m map[string]interface{} + if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { + return errors.Wrap(err, "transforming values file") + } + schart.Values = m + + // SaveDir looks for the file values.yaml when saving rather than the values + // key in order to preserve the comments in the YAML. The name placeholder + // needs to be replaced on that file. + for _, f := range schart.Raw { + if f.Name == ValuesfileName { + f.Data = transform(string(f.Data), schart.Name()) + } + } + + return SaveDir(schart, dest) +} + +// Create creates a new chart in a directory. +// +// Inside of dir, this will create a directory based on the name of +// chartfile.Name. It will then write the Chart.yaml into this directory and +// create the (empty) appropriate directories. +// +// The returned string will point to the newly created directory. It will be +// an absolute path, even if the provided base directory was relative. +// +// If dir does not exist, this will return an error. +// If Chart.yaml or any directories cannot be created, this will return an +// error. In such a case, this will attempt to clean up by removing the +// new chart directory. +func Create(name, dir string) (string, error) { + + // Sanity-check the name of a chart so user doesn't create one that causes problems. + if err := validateChartName(name); err != nil { + return "", err + } + + path, err := filepath.Abs(dir) + if err != nil { + return path, err + } + + if fi, err := os.Stat(path); err != nil { + return path, err + } else if !fi.IsDir() { + return path, errors.Errorf("no such directory %s", path) + } + + cdir := filepath.Join(path, name) + if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() { + return cdir, errors.Errorf("file %s already exists and is not a directory", cdir) + } + + // Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and + // ingress below); or making an existing template disabled by default, add the enabling condition in + // `TestHelmCreateChart_CheckDeprecatedWarnings` in `pkg/lint/lint_test.go` to make it run through deprecation checks + // with latest Kubernetes version. + files := []struct { + path string + content []byte + }{ + { + // Chart.yaml + path: filepath.Join(cdir, ChartfileName), + content: []byte(fmt.Sprintf(defaultChartfile, name)), + }, + { + // values.yaml + path: filepath.Join(cdir, ValuesfileName), + content: []byte(fmt.Sprintf(defaultValues, name)), + }, + { + // .helmignore + path: filepath.Join(cdir, IgnorefileName), + content: []byte(defaultIgnore), + }, + { + // ingress.yaml + path: filepath.Join(cdir, IngressFileName), + content: transform(defaultIngress, name), + }, + { + // deployment.yaml + path: filepath.Join(cdir, DeploymentName), + content: transform(defaultDeployment, name), + }, + { + // service.yaml + path: filepath.Join(cdir, ServiceName), + content: transform(defaultService, name), + }, + { + // serviceaccount.yaml + path: filepath.Join(cdir, ServiceAccountName), + content: transform(defaultServiceAccount, name), + }, + { + // hpa.yaml + path: filepath.Join(cdir, HorizontalPodAutoscalerName), + content: transform(defaultHorizontalPodAutoscaler, name), + }, + { + // NOTES.txt + path: filepath.Join(cdir, NotesName), + content: transform(defaultNotes, name), + }, + { + // _helpers.tpl + path: filepath.Join(cdir, HelpersName), + content: transform(defaultHelpers, name), + }, + { + // test-connection.yaml + path: filepath.Join(cdir, TestConnectionName), + content: transform(defaultTestConnection, name), + }, + } + + for _, file := range files { + if _, err := os.Stat(file.path); err == nil { + // There is no handle to a preferred output stream here. + fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path) + } + if err := writeFile(file.path, file.content); err != nil { + return cdir, err + } + } + // Need to add the ChartsDir explicitly as it does not contain any file OOTB + if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { + return cdir, err + } + return cdir, nil +} + +// transform performs a string replacement of the specified source for +// a given key with the replacement string +func transform(src, replacement string) []byte { + return []byte(strings.ReplaceAll(src, "", replacement)) +} + +func writeFile(name string, content []byte) error { + if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { + return err + } + return os.WriteFile(name, content, 0644) +} + +func validateChartName(name string) error { + if name == "" || len(name) > maxChartNameLength { + return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength) + } + if !chartName.MatchString(name) { + return fmt.Errorf("chart name must match the regular expression %q", chartName.String()) + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go new file mode 100644 index 000000000000..36a34192728d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -0,0 +1,356 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "log" + "strings" + + "github.com/mitchellh/copystructure" + + "helm.sh/helm/v3/pkg/chart" +) + +// ProcessDependencies checks through this chart's dependencies, processing accordingly. +// +// TODO: For Helm v4 this can be combined with or turned into ProcessDependenciesWithMerge +func ProcessDependencies(c *chart.Chart, v Values) error { + if err := processDependencyEnabled(c, v, ""); err != nil { + return err + } + return processDependencyImportValues(c, false) +} + +// ProcessDependenciesWithMerge checks through this chart's dependencies, processing accordingly. +// It is similar to ProcessDependencies but it does not remove nil values during +// the import/export handling process. +func ProcessDependenciesWithMerge(c *chart.Chart, v Values) error { + if err := processDependencyEnabled(c, v, ""); err != nil { + return err + } + return processDependencyImportValues(c, true) +} + +// processDependencyConditions disables charts based on condition path value in values +func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { + if reqs == nil { + return + } + for _, r := range reqs { + for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") { + if len(c) > 0 { + // retrieve value + vv, err := cvals.PathValue(cpath + c) + if err == nil { + // if not bool, warn + if bv, ok := vv.(bool); ok { + r.Enabled = bv + break + } + log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) + } else if _, ok := err.(ErrNoValue); !ok { + // this is a real error + log.Printf("Warning: PathValue returned error %v", err) + } + } + } + } +} + +// processDependencyTags disables charts based on tags in values +func processDependencyTags(reqs []*chart.Dependency, cvals Values) { + if reqs == nil { + return + } + vt, err := cvals.Table("tags") + if err != nil { + return + } + for _, r := range reqs { + var hasTrue, hasFalse bool + for _, k := range r.Tags { + if b, ok := vt[k]; ok { + // if not bool, warn + if bv, ok := b.(bool); ok { + if bv { + hasTrue = true + } else { + hasFalse = true + } + } else { + log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) + } + } + } + if !hasTrue && hasFalse { + r.Enabled = false + } else if hasTrue || !hasTrue && !hasFalse { + r.Enabled = true + } + } +} + +func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart { + for _, c := range charts { + if c == nil { + continue + } + if c.Name() != dep.Name { + continue + } + if !IsCompatibleRange(dep.Version, c.Metadata.Version) { + continue + } + + out := *c + md := *c.Metadata + out.Metadata = &md + + if dep.Alias != "" { + md.Name = dep.Alias + } + return &out + } + return nil +} + +// processDependencyEnabled removes disabled charts from dependencies +func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { + if c.Metadata.Dependencies == nil { + return nil + } + + var chartDependencies []*chart.Chart + // If any dependency is not a part of Chart.yaml + // then this should be added to chartDependencies. + // However, if the dependency is already specified in Chart.yaml + // we should not add it, as it would be processed from Chart.yaml anyway. + +Loop: + for _, existing := range c.Dependencies() { + for _, req := range c.Metadata.Dependencies { + if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) { + continue Loop + } + } + chartDependencies = append(chartDependencies, existing) + } + + for _, req := range c.Metadata.Dependencies { + if req == nil { + continue + } + if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil { + chartDependencies = append(chartDependencies, chartDependency) + } + if req.Alias != "" { + req.Name = req.Alias + } + } + c.SetDependencies(chartDependencies...) + + // set all to true + for _, lr := range c.Metadata.Dependencies { + lr.Enabled = true + } + cvals, err := CoalesceValues(c, v) + if err != nil { + return err + } + // flag dependencies as enabled/disabled + processDependencyTags(c.Metadata.Dependencies, cvals) + processDependencyConditions(c.Metadata.Dependencies, cvals, path) + // make a map of charts to remove + rm := map[string]struct{}{} + for _, r := range c.Metadata.Dependencies { + if !r.Enabled { + // remove disabled chart + rm[r.Name] = struct{}{} + } + } + // don't keep disabled charts in new slice + cd := []*chart.Chart{} + copy(cd, c.Dependencies()[:0]) + for _, n := range c.Dependencies() { + if _, ok := rm[n.Metadata.Name]; !ok { + cd = append(cd, n) + } + } + // don't keep disabled charts in metadata + cdMetadata := []*chart.Dependency{} + copy(cdMetadata, c.Metadata.Dependencies[:0]) + for _, n := range c.Metadata.Dependencies { + if _, ok := rm[n.Name]; !ok { + cdMetadata = append(cdMetadata, n) + } + } + + // recursively call self to process sub dependencies + for _, t := range cd { + subpath := path + t.Metadata.Name + "." + if err := processDependencyEnabled(t, cvals, subpath); err != nil { + return err + } + } + // set the correct dependencies in metadata + c.Metadata.Dependencies = nil + c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...) + c.SetDependencies(cd...) + + return nil +} + +// pathToMap creates a nested map given a YAML path in dot notation. +func pathToMap(path string, data map[string]interface{}) map[string]interface{} { + if path == "." { + return data + } + return set(parsePath(path), data) +} + +func set(path []string, data map[string]interface{}) map[string]interface{} { + if len(path) == 0 { + return nil + } + cur := data + for i := len(path) - 1; i >= 0; i-- { + cur = map[string]interface{}{path[i]: cur} + } + return cur +} + +// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field. +func processImportValues(c *chart.Chart, merge bool) error { + if c.Metadata.Dependencies == nil { + return nil + } + // combine chart values and empty config to get Values + var cvals Values + var err error + if merge { + cvals, err = MergeValues(c, nil) + } else { + cvals, err = CoalesceValues(c, nil) + } + if err != nil { + return err + } + b := make(map[string]interface{}) + // import values from each dependency if specified in import-values + for _, r := range c.Metadata.Dependencies { + var outiv []interface{} + for _, riv := range r.ImportValues { + switch iv := riv.(type) { + case map[string]interface{}: + child := iv["child"].(string) + parent := iv["parent"].(string) + + outiv = append(outiv, map[string]string{ + "child": child, + "parent": parent, + }) + + // get child table + vv, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err) + continue + } + // create value map from child to be merged into parent + if merge { + b = MergeTables(b, pathToMap(parent, vv.AsMap())) + } else { + b = CoalesceTables(b, pathToMap(parent, vv.AsMap())) + } + case string: + child := "exports." + iv + outiv = append(outiv, map[string]string{ + "child": child, + "parent": ".", + }) + vm, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table: %v", err) + continue + } + if merge { + b = MergeTables(b, vm.AsMap()) + } else { + b = CoalesceTables(b, vm.AsMap()) + } + } + } + r.ImportValues = outiv + } + + // Imported values from a child to a parent chart have a lower priority than + // the parents values. This enables parent charts to import a large section + // from a child and then override select parts. This is why b is merged into + // cvals in the code below and not the other way around. + if merge { + // deep copying the cvals as there are cases where pointers can end + // up in the cvals when they are copied onto b in ways that break things. + cvals = deepCopyMap(cvals) + c.Values = MergeTables(cvals, b) + } else { + // Trimming the nil values from cvals is needed for backwards compatibility. + // Previously, the b value had been populated with cvals along with some + // overrides. This caused the coalescing functionality to remove the + // nil/null values. This trimming is for backwards compat. + cvals = trimNilValues(cvals) + c.Values = CoalesceTables(cvals, b) + } + + return nil +} + +func deepCopyMap(vals map[string]interface{}) map[string]interface{} { + valsCopy, err := copystructure.Copy(vals) + if err != nil { + return vals + } + return valsCopy.(map[string]interface{}) +} + +func trimNilValues(vals map[string]interface{}) map[string]interface{} { + valsCopy, err := copystructure.Copy(vals) + if err != nil { + return vals + } + valsCopyMap := valsCopy.(map[string]interface{}) + for key, val := range valsCopyMap { + if val == nil { + // Iterate over the values and remove nil keys + delete(valsCopyMap, key) + } else if istable(val) { + // Recursively call into ourselves to remove keys from inner tables + valsCopyMap[key] = trimNilValues(val.(map[string]interface{})) + } + } + + return valsCopyMap +} + +// processDependencyImportValues imports specified chart values from child to parent. +func processDependencyImportValues(c *chart.Chart, merge bool) error { + for _, d := range c.Dependencies() { + // recurse + if err := processDependencyImportValues(d, merge); err != nil { + return err + } + } + return processImportValues(c, merge) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go new file mode 100644 index 000000000000..49c55ac5216b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go @@ -0,0 +1,45 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package chartutil contains tools for working with charts. + +Charts are described in the chart package (pkg/chart). +This package provides utilities for serializing and deserializing charts. + +A chart can be represented on the file system in one of two ways: + + - As a directory that contains a Chart.yaml file and other chart things. + - As a tarred gzipped file containing a directory that then contains a + Chart.yaml file. + +This package provides utilities for working with those file formats. + +The preferred way of loading a chart is using 'loader.Load`: + + chart, err := loader.Load(filename) + +This will attempt to discover whether the file at 'filename' is a directory or +a chart archive. It will then load accordingly. + +For accepting raw compressed tar file data from an io.Reader, the +'loader.LoadArchive()' will read in the data, uncompress it, and unpack it +into a Chart. + +When creating charts in memory, use the 'helm.sh/helm/pkg/chart' +package directly. +*/ +package chartutil // import "helm.sh/helm/v3/pkg/chartutil" diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go new file mode 100644 index 000000000000..0a4046d2e2ec --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go @@ -0,0 +1,43 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" +) + +// ErrNoTable indicates that a chart does not have a matching table. +type ErrNoTable struct { + Key string +} + +func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) } + +// ErrNoValue indicates that Values does not contain a key with a value +type ErrNoValue struct { + Key string +} + +func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } + +type ErrInvalidChartName struct { + Name string +} + +func (e ErrInvalidChartName) Error() string { + return fmt.Sprintf("%q is not a valid chart name", e.Name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go new file mode 100644 index 000000000000..ac59f257533b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "io" + "os" + "path/filepath" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// Expand uncompresses and extracts a chart into the specified directory. +func Expand(dir string, r io.Reader) error { + files, err := loader.LoadArchiveFiles(r) + if err != nil { + return err + } + + // Get the name of the chart + var chartName string + for _, file := range files { + if file.Name == "Chart.yaml" { + ch := &chart.Metadata{} + if err := yaml.Unmarshal(file.Data, ch); err != nil { + return errors.Wrap(err, "cannot load Chart.yaml") + } + chartName = ch.Name + } + } + if chartName == "" { + return errors.New("chart name not specified") + } + + // Find the base directory + // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up + // being wrong or returning an error. This was introduced in v0.4.0. + dir = filepath.Clean(dir) + chartdir, err := securejoin.SecureJoin(dir, chartName) + if err != nil { + return err + } + + // Copy all files verbatim. We don't parse these files because parsing can remove + // comments. + for _, file := range files { + outpath, err := securejoin.SecureJoin(chartdir, file.Name) + if err != nil { + return err + } + + // Make sure the necessary subdirs get created. + basedir := filepath.Dir(outpath) + if err := os.MkdirAll(basedir, 0755); err != nil { + return err + } + + if err := os.WriteFile(outpath, file.Data, 0644); err != nil { + return err + } + } + + return nil +} + +// ExpandFile expands the src file into the dest directory. +func ExpandFile(dest, src string) error { + h, err := os.Open(src) + if err != nil { + return err + } + defer h.Close() + return Expand(dest, h) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go new file mode 100644 index 000000000000..7b9768fd3cc2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ValidateAgainstSchema checks that values does not violate the structure laid out in schema +func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { + var sb strings.Builder + if chrt.Schema != nil { + err := ValidateAgainstSingleSchema(values, chrt.Schema) + if err != nil { + sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) + sb.WriteString(err.Error()) + } + } + + // For each dependency, recursively call this function with the coalesced values + for _, subchart := range chrt.Dependencies() { + subchartValues := values[subchart.Name()].(map[string]interface{}) + if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { + sb.WriteString(err.Error()) + } + } + + if sb.Len() > 0 { + return errors.New(sb.String()) + } + + return nil +} + +// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema +func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { + defer func() { + if r := recover(); r != nil { + reterr = fmt.Errorf("unable to validate schema: %s", r) + } + }() + + valuesData, err := yaml.Marshal(values) + if err != nil { + return err + } + valuesJSON, err := yaml.YAMLToJSON(valuesData) + if err != nil { + return err + } + if bytes.Equal(valuesJSON, []byte("null")) { + valuesJSON = []byte("{}") + } + schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) + valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) + + result, err := gojsonschema.Validate(schemaLoader, valuesLoader) + if err != nil { + return err + } + + if !result.Valid() { + var sb strings.Builder + for _, desc := range result.Errors() { + sb.WriteString(fmt.Sprintf("- %s\n", desc)) + } + return errors.New(sb.String()) + } + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go new file mode 100644 index 000000000000..4ee90709c927 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go @@ -0,0 +1,264 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") + +// SaveDir saves a chart as files in a directory. +// +// This takes the chart name, and creates a new subdirectory inside of the given dest +// directory, writing the chart's contents to that subdirectory. +func SaveDir(c *chart.Chart, dest string) error { + // Create the chart directory + err := validateName(c.Name()) + if err != nil { + return err + } + outdir := filepath.Join(dest, c.Name()) + if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { + return errors.Errorf("file %s already exists and is not a directory", outdir) + } + if err := os.MkdirAll(outdir, 0755); err != nil { + return err + } + + // Save the chart file. + if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil { + return err + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + vf := filepath.Join(outdir, ValuesfileName) + if err := writeFile(vf, f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + filename := filepath.Join(outdir, SchemafileName) + if err := writeFile(filename, c.Schema); err != nil { + return err + } + } + + // Save templates and files + for _, o := range [][]*chart.File{c.Templates, c.Files} { + for _, f := range o { + n := filepath.Join(outdir, f.Name) + if err := writeFile(n, f.Data); err != nil { + return err + } + } + } + + // Save dependencies + base := filepath.Join(outdir, ChartsDir) + for _, dep := range c.Dependencies() { + // Here, we write each dependency as a tar file. + if _, err := Save(dep, base); err != nil { + return errors.Wrapf(err, "saving %s", dep.ChartFullPath()) + } + } + return nil +} + +// Save creates an archived chart to the given directory. +// +// This takes an existing chart and a destination directory. +// +// If the directory is /foo, and the chart is named bar, with version 1.0.0, this +// will generate /foo/bar-1.0.0.tgz. +// +// This returns the absolute path to the chart archive file. +func Save(c *chart.Chart, outDir string) (string, error) { + if err := c.Validate(); err != nil { + return "", errors.Wrap(err, "chart validation") + } + + filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version) + filename = filepath.Join(outDir, filename) + dir := filepath.Dir(filename) + if stat, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err2 := os.MkdirAll(dir, 0755); err2 != nil { + return "", err2 + } + } else { + return "", errors.Wrapf(err, "stat %s", dir) + } + } else if !stat.IsDir() { + return "", errors.Errorf("is not a directory: %s", dir) + } + + f, err := os.Create(filename) + if err != nil { + return "", err + } + + // Wrap in gzip writer + zipper := gzip.NewWriter(f) + zipper.Header.Extra = headerBytes + zipper.Header.Comment = "Helm" + + // Wrap in tar writer + twriter := tar.NewWriter(zipper) + rollback := false + defer func() { + twriter.Close() + zipper.Close() + f.Close() + if rollback { + os.Remove(filename) + } + }() + + if err := writeTarContents(twriter, c, ""); err != nil { + rollback = true + return filename, err + } + return filename, nil +} + +func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { + err := validateName(c.Name()) + if err != nil { + return err + } + base := filepath.Join(prefix, c.Name()) + + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := c.Metadata.Dependencies + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = nil + } + // Save Chart.yaml + cdata, err := yaml.Marshal(c.Metadata) + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = savedDependencies + } + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil { + return err + } + + // Save Chart.lock + // TODO: remove the APIVersion check when APIVersionV1 is not used anymore + if c.Metadata.APIVersion == chart.APIVersionV2 { + if c.Lock != nil { + ldata, err := yaml.Marshal(c.Lock) + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil { + return err + } + } + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + if !json.Valid(c.Schema) { + return errors.New("Invalid JSON in " + SchemafileName) + } + if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil { + return err + } + } + + // Save templates + for _, f := range c.Templates { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save files + for _, f := range c.Files { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save dependencies + for _, dep := range c.Dependencies() { + if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil { + return err + } + } + return nil +} + +// writeToTar writes a single file to a tar archive. +func writeToTar(out *tar.Writer, name string, body []byte) error { + // TODO: Do we need to create dummy parent directory names if none exist? + h := &tar.Header{ + Name: filepath.ToSlash(name), + Mode: 0644, + Size: int64(len(body)), + ModTime: time.Now(), + } + if err := out.WriteHeader(h); err != nil { + return err + } + _, err := out.Write(body) + return err +} + +// If the name has directory name has characters which would change the location +// they need to be removed. +func validateName(name string) error { + nname := filepath.Base(name) + + if nname != name { + return ErrInvalidChartName{name} + } + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go new file mode 100644 index 000000000000..05c090cb6695 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go @@ -0,0 +1,112 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "regexp" + + "github.com/pkg/errors" +) + +// validName is a regular expression for resource names. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +var ( + // errMissingName indicates that a release (name) was not provided. + errMissingName = errors.New("no name provided") + + // errInvalidName indicates that an invalid release name was provided + errInvalidName = fmt.Errorf( + "invalid release name, must match regex %s and the length must not be longer than 53", + validName.String()) + + // errInvalidKubernetesName indicates that the name does not meet the Kubernetes + // restrictions on metadata names. + errInvalidKubernetesName = fmt.Errorf( + "invalid metadata name, must match regex %s and the length must not be longer than 253", + validName.String()) +) + +const ( + // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names) + // some resource names have a max length of 63 characters while others have a max + // length of 253 characters. As we cannot be sure the resources used in a chart, we + // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name + // of the resource. The reason is that chart maintainers can use release name as part of + // the resource name (and some additional chars). + maxReleaseNameLen = 53 + // maxMetadataNameLen is the maximum length Kubernetes allows for any name. + maxMetadataNameLen = 253 +) + +// ValidateReleaseName performs checks for an entry for a Helm release name +// +// For Helm to allow a name, it must be below a certain character count (53) and also match +// a regular expression. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +func ValidateReleaseName(name string) error { + // This case is preserved for backwards compatibility + if name == "" { + return errMissingName + + } + if len(name) > maxReleaseNameLen || !validName.MatchString(name) { + return errInvalidName + } + return nil +} + +// ValidateMetadataName validates the name field of a Kubernetes metadata object. +// +// Empty strings, strings longer than 253 chars, or strings that don't match the regexp +// will fail. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +// +// Deprecated: remove in Helm 4. Name validation now uses rules defined in +// pkg/lint/rules.validateMetadataNameFunc() +func ValidateMetadataName(name string) error { + if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) { + return errInvalidKubernetesName + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go new file mode 100644 index 000000000000..566951ec5918 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go @@ -0,0 +1,225 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// GlobalKey is the name of the Values key that is used for storing global vars. +const GlobalKey = "global" + +// Values represents a collection of chart values. +type Values map[string]interface{} + +// YAML encodes the Values into a YAML string. +func (v Values) YAML() (string, error) { + b, err := yaml.Marshal(v) + return string(b), err +} + +// Table gets a table (YAML subsection) from a Values object. +// +// The table is returned as a Values. +// +// Compound table names may be specified with dots: +// +// foo.bar +// +// The above will be evaluated as "The table bar inside the table +// foo". +// +// An ErrNoTable is returned if the table does not exist. +func (v Values) Table(name string) (Values, error) { + table := v + var err error + + for _, n := range parsePath(name) { + if table, err = tableLookup(table, n); err != nil { + break + } + } + return table, err +} + +// AsMap is a utility function for converting Values to a map[string]interface{}. +// +// It protects against nil map panics. +func (v Values) AsMap() map[string]interface{} { + if len(v) == 0 { + return map[string]interface{}{} + } + return v +} + +// Encode writes serialized Values information to the given io.Writer. +func (v Values) Encode(w io.Writer) error { + out, err := yaml.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(out) + return err +} + +func tableLookup(v Values, simple string) (Values, error) { + v2, ok := v[simple] + if !ok { + return v, ErrNoTable{simple} + } + if vv, ok := v2.(map[string]interface{}); ok { + return vv, nil + } + + // This catches a case where a value is of type Values, but doesn't (for some + // reason) match the map[string]interface{}. This has been observed in the + // wild, and might be a result of a nil map of type Values. + if vv, ok := v2.(Values); ok { + return vv, nil + } + + return Values{}, ErrNoTable{simple} +} + +// ReadValues will parse YAML byte data into a Values. +func ReadValues(data []byte) (vals Values, err error) { + err = yaml.Unmarshal(data, &vals, func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + }) + if len(vals) == 0 { + vals = Values{} + } + return vals, err +} + +// ReadValuesFile will parse a YAML file into a map of values. +func ReadValuesFile(filename string) (Values, error) { + data, err := os.ReadFile(filename) + if err != nil { + return map[string]interface{}{}, err + } + return ReadValues(data) +} + +// ReleaseOptions represents the additional release options needed +// for the composition of the final values struct +type ReleaseOptions struct { + Name string + Namespace string + Revision int + IsUpgrade bool + IsInstall bool +} + +// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { + return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false) +} + +// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValuesWithSchemaValidation(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities, skipSchemaValidation bool) (Values, error) { + if caps == nil { + caps = DefaultCapabilities + } + top := map[string]interface{}{ + "Chart": chrt.Metadata, + "Capabilities": caps, + "Release": map[string]interface{}{ + "Name": options.Name, + "Namespace": options.Namespace, + "IsUpgrade": options.IsUpgrade, + "IsInstall": options.IsInstall, + "Revision": options.Revision, + "Service": "Helm", + }, + } + + vals, err := CoalesceValues(chrt, chrtVals) + if err != nil { + return top, err + } + + if !skipSchemaValidation { + if err := ValidateAgainstSchema(chrt, vals); err != nil { + errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s" + return top, fmt.Errorf(errFmt, err.Error()) + } + } + + top["Values"] = vals + return top, nil +} + +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} + +// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. +// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. +// Given the following YAML data the value at path "chapter.one.title" is "Loomings". +// +// chapter: +// one: +// title: "Loomings" +func (v Values) PathValue(path string) (interface{}, error) { + if path == "" { + return nil, errors.New("YAML path cannot be empty") + } + return v.pathValue(parsePath(path)) +} + +func (v Values) pathValue(path []string) (interface{}, error) { + if len(path) == 1 { + // if exists must be root key not table + if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { + return v[path[0]], nil + } + return nil, ErrNoValue{path[0]} + } + + key, path := path[len(path)-1], path[:len(path)-1] + // get our table for table path + t, err := v.Table(joinPath(path...)) + if err != nil { + return nil, ErrNoValue{key} + } + // check table for key and ensure value is not a table + if k, ok := t[key]; ok && !istable(k) { + return k, nil + } + return nil, ErrNoValue{key} +} + +func parsePath(key string) []string { return strings.Split(key, ".") } + +func joinPath(path ...string) string { return strings.Join(path, ".") } diff --git a/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/vendor/helm.sh/helm/v3/pkg/cli/environment.go new file mode 100644 index 000000000000..635806344268 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/cli/environment.go @@ -0,0 +1,267 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package cli describes the operating environment for the Helm CLI. + +Helm's environment encapsulates all of the service dependencies Helm has. +These dependencies are expressed as interfaces so that alternate implementations +(mocks, etc.) can be easily generated. +*/ +package cli + +import ( + "fmt" + "net/http" + "os" + "strconv" + "strings" + + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/internal/version" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/kube" +) + +// defaultMaxHistory sets the maximum number of releases to 0: unlimited +const defaultMaxHistory = 10 + +// defaultBurstLimit sets the default client-side throttling limit +const defaultBurstLimit = 100 + +// defaultQPS sets the default QPS value to 0 to use library defaults unless specified +const defaultQPS = float32(0) + +// EnvSettings describes all of the environment settings. +type EnvSettings struct { + namespace string + config *genericclioptions.ConfigFlags + + // KubeConfig is the path to the kubeconfig file + KubeConfig string + // KubeContext is the name of the kubeconfig context. + KubeContext string + // Bearer KubeToken used for authentication + KubeToken string + // Username to impersonate for the operation + KubeAsUser string + // Groups to impersonate for the operation, multiple groups parsed from a comma delimited list + KubeAsGroups []string + // Kubernetes API Server Endpoint for authentication + KubeAPIServer string + // Custom certificate authority file. + KubeCaFile string + // KubeInsecureSkipTLSVerify indicates if server's certificate will not be checked for validity. + // This makes the HTTPS connections insecure + KubeInsecureSkipTLSVerify bool + // KubeTLSServerName overrides the name to use for server certificate validation. + // If it is not provided, the hostname used to contact the server is used + KubeTLSServerName string + // Debug indicates whether or not Helm is running in Debug mode. + Debug bool + // RegistryConfig is the path to the registry config file. + RegistryConfig string + // RepositoryConfig is the path to the repositories file. + RepositoryConfig string + // RepositoryCache is the path to the repository cache directory. + RepositoryCache string + // PluginsDirectory is the path to the plugins directory. + PluginsDirectory string + // MaxHistory is the max release history maintained. + MaxHistory int + // BurstLimit is the default client-side throttling limit. + BurstLimit int + // QPS is queries per second which may be used to avoid throttling. + QPS float32 +} + +func New() *EnvSettings { + env := &EnvSettings{ + namespace: os.Getenv("HELM_NAMESPACE"), + MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory), + KubeContext: os.Getenv("HELM_KUBECONTEXT"), + KubeToken: os.Getenv("HELM_KUBETOKEN"), + KubeAsUser: os.Getenv("HELM_KUBEASUSER"), + KubeAsGroups: envCSV("HELM_KUBEASGROUPS"), + KubeAPIServer: os.Getenv("HELM_KUBEAPISERVER"), + KubeCaFile: os.Getenv("HELM_KUBECAFILE"), + KubeTLSServerName: os.Getenv("HELM_KUBETLS_SERVER_NAME"), + KubeInsecureSkipTLSVerify: envBoolOr("HELM_KUBEINSECURE_SKIP_TLS_VERIFY", false), + PluginsDirectory: envOr("HELM_PLUGINS", helmpath.DataPath("plugins")), + RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")), + RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")), + RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")), + BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit), + QPS: envFloat32Or("HELM_QPS", defaultQPS), + } + env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG")) + + // bind to kubernetes config flags + config := &genericclioptions.ConfigFlags{ + Namespace: &env.namespace, + Context: &env.KubeContext, + BearerToken: &env.KubeToken, + APIServer: &env.KubeAPIServer, + CAFile: &env.KubeCaFile, + KubeConfig: &env.KubeConfig, + Impersonate: &env.KubeAsUser, + Insecure: &env.KubeInsecureSkipTLSVerify, + TLSServerName: &env.KubeTLSServerName, + ImpersonateGroup: &env.KubeAsGroups, + WrapConfigFn: func(config *rest.Config) *rest.Config { + config.Burst = env.BurstLimit + config.QPS = env.QPS + config.Wrap(func(rt http.RoundTripper) http.RoundTripper { + return &kube.RetryingRoundTripper{Wrapped: rt} + }) + config.UserAgent = version.GetUserAgent() + return config + }, + } + if env.BurstLimit != defaultBurstLimit { + config = config.WithDiscoveryBurst(env.BurstLimit) + } + env.config = config + + return env +} + +// AddFlags binds flags to the given flagset. +func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&s.namespace, "namespace", "n", s.namespace, "namespace scope for this request") + fs.StringVar(&s.KubeConfig, "kubeconfig", "", "path to the kubeconfig file") + fs.StringVar(&s.KubeContext, "kube-context", s.KubeContext, "name of the kubeconfig context to use") + fs.StringVar(&s.KubeToken, "kube-token", s.KubeToken, "bearer token used for authentication") + fs.StringVar(&s.KubeAsUser, "kube-as-user", s.KubeAsUser, "username to impersonate for the operation") + fs.StringArrayVar(&s.KubeAsGroups, "kube-as-group", s.KubeAsGroups, "group to impersonate for the operation, this flag can be repeated to specify multiple groups.") + fs.StringVar(&s.KubeAPIServer, "kube-apiserver", s.KubeAPIServer, "the address and the port for the Kubernetes API server") + fs.StringVar(&s.KubeCaFile, "kube-ca-file", s.KubeCaFile, "the certificate authority file for the Kubernetes API server connection") + fs.StringVar(&s.KubeTLSServerName, "kube-tls-server-name", s.KubeTLSServerName, "server name to use for Kubernetes API server certificate validation. If it is not provided, the hostname used to contact the server is used") + fs.BoolVar(&s.KubeInsecureSkipTLSVerify, "kube-insecure-skip-tls-verify", s.KubeInsecureSkipTLSVerify, "if true, the Kubernetes API server's certificate will not be checked for validity. This will make your HTTPS connections insecure") + fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output") + fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file") + fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs") + fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the directory containing cached repository indexes") + fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit") + fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting") +} + +func envOr(name, def string) string { + if v, ok := os.LookupEnv(name); ok { + return v + } + return def +} + +func envBoolOr(name string, def bool) bool { + if name == "" { + return def + } + envVal := envOr(name, strconv.FormatBool(def)) + ret, err := strconv.ParseBool(envVal) + if err != nil { + return def + } + return ret +} + +func envIntOr(name string, def int) int { + if name == "" { + return def + } + envVal := envOr(name, strconv.Itoa(def)) + ret, err := strconv.Atoi(envVal) + if err != nil { + return def + } + return ret +} + +func envFloat32Or(name string, def float32) float32 { + if name == "" { + return def + } + envVal := envOr(name, strconv.FormatFloat(float64(def), 'f', 2, 32)) + ret, err := strconv.ParseFloat(envVal, 32) + if err != nil { + return def + } + return float32(ret) +} + +func envCSV(name string) (ls []string) { + trimmed := strings.Trim(os.Getenv(name), ", ") + if trimmed != "" { + ls = strings.Split(trimmed, ",") + } + return +} + +func (s *EnvSettings) EnvVars() map[string]string { + envvars := map[string]string{ + "HELM_BIN": os.Args[0], + "HELM_CACHE_HOME": helmpath.CachePath(""), + "HELM_CONFIG_HOME": helmpath.ConfigPath(""), + "HELM_DATA_HOME": helmpath.DataPath(""), + "HELM_DEBUG": fmt.Sprint(s.Debug), + "HELM_PLUGINS": s.PluginsDirectory, + "HELM_REGISTRY_CONFIG": s.RegistryConfig, + "HELM_REPOSITORY_CACHE": s.RepositoryCache, + "HELM_REPOSITORY_CONFIG": s.RepositoryConfig, + "HELM_NAMESPACE": s.Namespace(), + "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory), + "HELM_BURST_LIMIT": strconv.Itoa(s.BurstLimit), + "HELM_QPS": strconv.FormatFloat(float64(s.QPS), 'f', 2, 32), + + // broken, these are populated from helm flags and not kubeconfig. + "HELM_KUBECONTEXT": s.KubeContext, + "HELM_KUBETOKEN": s.KubeToken, + "HELM_KUBEASUSER": s.KubeAsUser, + "HELM_KUBEASGROUPS": strings.Join(s.KubeAsGroups, ","), + "HELM_KUBEAPISERVER": s.KubeAPIServer, + "HELM_KUBECAFILE": s.KubeCaFile, + "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": strconv.FormatBool(s.KubeInsecureSkipTLSVerify), + "HELM_KUBETLS_SERVER_NAME": s.KubeTLSServerName, + } + if s.KubeConfig != "" { + envvars["KUBECONFIG"] = s.KubeConfig + } + return envvars +} + +// Namespace gets the namespace from the configuration +func (s *EnvSettings) Namespace() string { + if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } + if s.namespace != "" { + return s.namespace + } + return "default" +} + +// SetNamespace sets the namespace in the configuration +func (s *EnvSettings) SetNamespace(namespace string) { + s.namespace = namespace +} + +// RESTClientGetter gets the kubeconfig from EnvSettings +func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter { + return s.config +} diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go new file mode 100644 index 000000000000..9e8e243b8ca7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -0,0 +1,374 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/fileutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// VerificationStrategy describes a strategy for determining whether to verify a chart. +type VerificationStrategy int + +const ( + // VerifyNever will skip all verification of a chart. + VerifyNever VerificationStrategy = iota + // VerifyIfPossible will attempt a verification, it will not error if verification + // data is missing. But it will not stop processing if verification fails. + VerifyIfPossible + // VerifyAlways will always attempt a verification, and will fail if the + // verification fails. + VerifyAlways + // VerifyLater will fetch verification data, but not do any verification. + // This is to accommodate the case where another step of the process will + // perform verification. + VerifyLater +) + +// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos. +var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL") + +// ChartDownloader handles downloading a chart. +// +// It is capable of performing verifications on charts as well. +type ChartDownloader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Verify indicates what verification strategy to use. + Verify VerificationStrategy + // Keyring is the keyring file used for verification. + Keyring string + // Getter collection for the operation + Getters getter.Providers + // Options provide parameters to be passed along to the Getter being initialized. + Options []getter.Option + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file. +// +// If Verify is set to VerifyNever, the verification will be nil. +// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure. +// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails. +// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it. +// +// For VerifyNever and VerifyIfPossible, the Verification may be empty. +// +// Returns a string path to the location where the file was downloaded and a verification +// (if provenance was verified), or an error if something bad happened. +func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { + u, err := c.ResolveChartVersion(ref, version) + if err != nil { + return "", nil, err + } + + g, err := c.Getters.ByScheme(u.Scheme) + if err != nil { + return "", nil, err + } + + c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) + + data, err := g.Get(u.String(), c.Options...) + if err != nil { + return "", nil, err + } + + name := filepath.Base(u.Path) + if u.Scheme == registry.OCIScheme { + idx := strings.LastIndexByte(name, ':') + name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:]) + } + + destfile := filepath.Join(dest, name) + if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil { + return destfile, nil, err + } + + // If provenance is requested, verify it. + ver := &provenance.Verification{} + if c.Verify > VerifyNever { + body, err := g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return destfile, ver, errors.Errorf("failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return destfile, ver, nil + } + provfile := destfile + ".prov" + if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { + return destfile, nil, err + } + + if c.Verify != VerifyLater { + ver, err = VerifyChart(destfile, c.Keyring) + if err != nil { + // Fail always in this case, since it means the verification step + // failed. + return destfile, ver, err + } + } + } + return destfile, ver, nil +} + +// ResolveChartVersion resolves a chart reference to a URL. +// +// It returns the URL and sets the ChartDownloader's Options that can fetch +// the URL using the appropriate Getter. +// +// A reference may be an HTTP URL, an oci reference URL, a 'reponame/chartname' +// reference, or a local path. +// +// A version is a SemVer string (1.2.3-beta.1+f334a6789). +// +// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned) +// - For a chart reference +// - If version is non-empty, this will return the URL for that version +// - If version is empty, this will return the URL for the latest version +// - If no version can be found, an error is returned +func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) { + u, err := url.Parse(ref) + if err != nil { + return nil, errors.Errorf("invalid chart URL format: %s", ref) + } + + if registry.IsOCI(u.String()) { + return c.RegistryClient.ValidateReference(ref, version, u) + } + + rf, err := loadRepoConfig(c.RepositoryConfig) + if err != nil { + return u, err + } + + if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { + // In this case, we have to find the parent repo that contains this chart + // URL. And this is an unfortunate problem, as it requires actually going + // through each repo cache file and finding a matching URL. But basically + // we want to find the repo in case we have special SSL cert config + // for that repo. + + rc, err := c.scanReposForURL(ref, rf) + if err != nil { + // If there is no special config, return the default HTTP client and + // swallow the error. + if err == ErrNoOwnerRepo { + // Make sure to add the ref URL as the URL for the getter + c.Options = append(c.Options, getter.WithURL(ref)) + return u, nil + } + return u, err + } + + // If we get here, we don't need to go through the next phase of looking + // up the URL. We have it already. So we just set the parameters and return. + c.Options = append( + c.Options, + getter.WithURL(rc.URL), + ) + if rc.CertFile != "" || rc.KeyFile != "" || rc.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(rc.CertFile, rc.KeyFile, rc.CAFile)) + } + if rc.Username != "" && rc.Password != "" { + c.Options = append( + c.Options, + getter.WithBasicAuth(rc.Username, rc.Password), + getter.WithPassCredentialsAll(rc.PassCredentialsAll), + ) + } + return u, nil + } + + // See if it's of the form: repo/path_to_chart + p := strings.SplitN(u.Path, "/", 2) + if len(p) < 2 { + return u, errors.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) + } + + repoName := p[0] + chartName := p[1] + rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) + + if err != nil { + return u, err + } + + // Now that we have the chart repository information we can use that URL + // to set the URL for the getter. + c.Options = append(c.Options, getter.WithURL(rc.URL)) + + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return u, err + } + + if r != nil && r.Config != nil { + if r.Config.CertFile != "" || r.Config.KeyFile != "" || r.Config.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile)) + } + if r.Config.Username != "" && r.Config.Password != "" { + c.Options = append(c.Options, + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) + } + } + + // Next, we need to load the index, and actually look up the chart. + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return u, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + cv, err := i.Get(chartName, version) + if err != nil { + return u, errors.Wrapf(err, "chart %q matching %s not found in %s index. (try 'helm repo update')", chartName, version, r.Config.Name) + } + + if len(cv.URLs) == 0 { + return u, errors.Errorf("chart %q has no downloadable URLs", ref) + } + + // TODO: Seems that picking first URL is not fully correct + resolvedURL, err := repo.ResolveReferenceURL(rc.URL, cv.URLs[0]) + + if err != nil { + return u, errors.Errorf("invalid chart URL format: %s", ref) + } + + return url.Parse(resolvedURL) +} + +// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart. +// +// It assumes that a chart archive file is accompanied by a provenance file whose +// name is the archive file name plus the ".prov" extension. +func VerifyChart(path, keyring string) (*provenance.Verification, error) { + // For now, error out if it's not a tar file. + switch fi, err := os.Stat(path); { + case err != nil: + return nil, err + case fi.IsDir(): + return nil, errors.New("unpacked charts cannot be verified") + case !isTar(path): + return nil, errors.New("chart must be a tgz file") + } + + provfile := path + ".prov" + if _, err := os.Stat(provfile); err != nil { + return nil, errors.Wrapf(err, "could not load provenance file %s", provfile) + } + + sig, err := provenance.NewFromKeyring(keyring, "") + if err != nil { + return nil, errors.Wrap(err, "failed to load keyring") + } + return sig.Verify(path, provfile) +} + +// isTar tests whether the given file is a tar file. +// +// Currently, this simply checks extension, since a subsequent function will +// untar the file and validate its binary format. +func isTar(filename string) bool { + return strings.EqualFold(filepath.Ext(filename), ".tgz") +} + +func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) { + for _, rc := range cfgs { + if rc.Name == name { + if rc.URL == "" { + return nil, errors.Errorf("no URL found for repository %s", name) + } + return rc, nil + } + } + return nil, errors.Errorf("repo %s not found", name) +} + +// scanReposForURL scans all repos to find which repo contains the given URL. +// +// This will attempt to find the given URL in all of the known repositories files. +// +// If the URL is found, this will return the repo entry that contained that URL. +// +// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo +// error is returned. +// +// Other errors may be returned when repositories cannot be loaded or searched. +// +// Technically, the fact that a URL is not found in a repo is not a failure indication. +// Charts are not required to be included in an index before they are valid. So +// be mindful of this case. +// +// The same URL can technically exist in two or more repositories. This algorithm +// will return the first one it finds. Order is determined by the order of repositories +// in the repositories.yaml file. +func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry, error) { + // FIXME: This is far from optimal. Larger installations and index files will + // incur a performance hit for this type of scanning. + for _, rc := range rf.Repositories { + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return nil, err + } + + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return nil, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + for _, entry := range i.Entries { + for _, ver := range entry { + for _, dl := range ver.URLs { + if urlutil.Equal(u, dl) { + return rc, nil + } + } + } + } + } + // This means that there is no repo file for the given URL. + return nil, ErrNoOwnerRepo +} + +func loadRepoConfig(file string) (*repo.File, error) { + r, err := repo.LoadFile(file) + if err != nil && !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + return r, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/doc.go b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go new file mode 100644 index 000000000000..8484680907a4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package downloader provides a library for downloading charts. + +This package contains various tools for downloading charts from repository +servers, and then storing them in Helm-specific directory structures. This +library contains many functions that depend on a specific +filesystem layout. +*/ +package downloader diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go new file mode 100644 index 000000000000..ec4056d2753b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -0,0 +1,905 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "crypto" + "encoding/hex" + "fmt" + "io" + "log" + "net/url" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/internal/resolver" + "helm.sh/helm/v3/internal/third_party/dep/fs" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache. +// The value of Repos is missing repos. +type ErrRepoNotFound struct { + Repos []string +} + +// Error implements the error interface. +func (e ErrRepoNotFound) Error() string { + return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", ")) +} + +// Manager handles the lifecycle of fetching, resolving, and storing dependencies. +type Manager struct { + // Out is used to print warnings and notifications. + Out io.Writer + // ChartPath is the path to the unpacked base chart upon which this operates. + ChartPath string + // Verification indicates whether the chart should be verified. + Verify VerificationStrategy + // Debug is the global "--debug" flag + Debug bool + // Keyring is the key ring file. + Keyring string + // SkipUpdate indicates that the repository should not be updated first. + SkipUpdate bool + // Getter collection for the operation + Getters []getter.Provider + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// Build rebuilds a local charts directory from a lockfile. +// +// If the lockfile is not present, this will run a Manager.Update() +// +// If SkipUpdate is set, this will not update the repository. +func (m *Manager) Build() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If a lock file is found, run a build from that. Otherwise, just do + // an update. + lock := c.Lock + if lock == nil { + return m.Update() + } + + // Check that all of the repos we're dependent on actually exist. + req := c.Metadata.Dependencies + + // If using apiVersion v1, calculate the hash before resolve repo names + // because resolveRepoNames will change req if req uses repo alias + // and Helm 2 calculate the digest from the original req + // Fix for: https://github.com/helm/helm/issues/7619 + var v2Sum string + if c.Metadata.APIVersion == chart.APIVersionV1 { + v2Sum, err = resolver.HashV2Req(req) + if err != nil { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } + + if _, err := m.resolveRepoNames(req); err != nil { + return err + } + + if sum, err := resolver.HashReq(req, lock.Dependencies); err != nil || sum != lock.Digest { + // If lock digest differs and chart is apiVersion v1, it maybe because the lock was built + // with Helm 2 and therefore should be checked with Helm v2 hash + // Fix for: https://github.com/helm/helm/issues/7233 + if c.Metadata.APIVersion == chart.APIVersionV1 { + log.Println("warning: a valid Helm v3 hash was not found. Checking against Helm v2 hash...") + if v2Sum != lock.Digest { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } else { + return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies") + } + } + + // Check that all of the repos we're dependent on actually exist. + if err := m.hasAllRepos(lock.Dependencies); err != nil { + return err + } + + if !m.SkipUpdate { + // For each repo in the file, update the cached copy of that repo + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to fetch every package here into charts/ + return m.downloadAll(lock.Dependencies) +} + +// Update updates a local charts directory. +// +// It first reads the Chart.yaml file, and then attempts to +// negotiate versions based on that. It will download the versions +// from remote chart repositories unless SkipUpdate is true. +func (m *Manager) Update() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If no dependencies are found, we consider this a successful + // completion. + req := c.Metadata.Dependencies + if req == nil { + return nil + } + + // Get the names of the repositories the dependencies need that Helm is + // configured to know about. + repoNames, err := m.resolveRepoNames(req) + if err != nil { + return err + } + + // For the repositories Helm is not configured to know about, ensure Helm + // has some information about them and, when possible, the index files + // locally. + // TODO(mattfarina): Repositories should be explicitly added by end users + // rather than automatic. In Helm v4 require users to add repositories. They + // should have to add them in order to make sure they are aware of the + // repositories and opt-in to any locations, for security. + repoNames, err = m.ensureMissingRepos(repoNames, req) + if err != nil { + return err + } + + // For each of the repositories Helm is configured to know about, update + // the index information locally. + if !m.SkipUpdate { + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to find out which version of a chart best satisfies the + // dependencies in the Chart.yaml + lock, err := m.resolve(req, repoNames) + if err != nil { + return err + } + + // Now we need to fetch every package here into charts/ + if err := m.downloadAll(lock.Dependencies); err != nil { + return err + } + + // downloadAll might overwrite dependency version, recalculate lock digest + newDigest, err := resolver.HashReq(req, lock.Dependencies) + if err != nil { + return err + } + lock.Digest = newDigest + + // If the lock file hasn't changed, don't write a new one. + oldLock := c.Lock + if oldLock != nil && oldLock.Digest == lock.Digest { + return nil + } + + // Finally, we need to write the lockfile. + return writeLock(m.ChartPath, lock, c.Metadata.APIVersion == chart.APIVersionV1) +} + +func (m *Manager) loadChartDir() (*chart.Chart, error) { + if fi, err := os.Stat(m.ChartPath); err != nil { + return nil, errors.Wrapf(err, "could not find %s", m.ChartPath) + } else if !fi.IsDir() { + return nil, errors.New("only unpacked charts can be updated") + } + return loader.LoadDir(m.ChartPath) +} + +// resolve takes a list of dependencies and translates them into an exact version to download. +// +// This returns a lock file, which has all of the dependencies normalized to a specific version. +func (m *Manager) resolve(req []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + res := resolver.New(m.ChartPath, m.RepositoryCache, m.RegistryClient) + return res.Resolve(req, repoNames) +} + +// downloadAll takes a list of dependencies and downloads them into charts/ +// +// It will delete versions of the chart that exist on disk and might cause +// a conflict. +func (m *Manager) downloadAll(deps []*chart.Dependency) error { + repos, err := m.loadChartRepositories() + if err != nil { + return err + } + + destPath := filepath.Join(m.ChartPath, "charts") + tmpPath := filepath.Join(m.ChartPath, fmt.Sprintf("tmpcharts-%d", os.Getpid())) + + // Check if 'charts' directory is not actually a directory. If it does not exist, create it. + if fi, err := os.Stat(destPath); err == nil { + if !fi.IsDir() { + return errors.Errorf("%q is not a directory", destPath) + } + } else if os.IsNotExist(err) { + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + } else { + return fmt.Errorf("unable to retrieve file info for '%s': %v", destPath, err) + } + + // Prepare tmpPath + if err := os.MkdirAll(tmpPath, 0755); err != nil { + return err + } + defer os.RemoveAll(tmpPath) + + fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps)) + var saveError error + churls := make(map[string]struct{}) + for _, dep := range deps { + // No repository means the chart is in charts directory + if dep.Repository == "" { + fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name) + // NOTE: we are only validating the local dependency conforms to the constraints. No copying to tmpPath is necessary. + chartPath := filepath.Join(destPath, dep.Name) + ch, err := loader.LoadDir(chartPath) + if err != nil { + return fmt.Errorf("unable to load chart '%s': %v", chartPath, err) + } + + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return fmt.Errorf("dependency %s has an invalid version/constraint format: %s", dep.Name, err) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return fmt.Errorf("invalid version %s for dependency %s: %s", dep.Version, dep.Name, err) + } + + if !constraint.Check(v) { + saveError = fmt.Errorf("dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version) + break + } + continue + } + if strings.HasPrefix(dep.Repository, "file://") { + if m.Debug { + fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository) + } + ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version, tmpPath) + if err != nil { + saveError = err + break + } + dep.Version = ver + continue + } + + // Any failure to resolve/download a chart should fail: + // https://github.com/helm/helm/issues/1439 + churl, username, password, insecureskiptlsverify, passcredentialsall, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + if err != nil { + saveError = errors.Wrapf(err, "could not find %s", churl) + break + } + + if _, ok := churls[churl]; ok { + fmt.Fprintf(m.Out, "Already downloaded %s from repo %s\n", dep.Name, dep.Repository) + continue + } + + fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository) + + dl := ChartDownloader{ + Out: m.Out, + Verify: m.Verify, + Keyring: m.Keyring, + RepositoryConfig: m.RepositoryConfig, + RepositoryCache: m.RepositoryCache, + RegistryClient: m.RegistryClient, + Getters: m.Getters, + Options: []getter.Option{ + getter.WithBasicAuth(username, password), + getter.WithPassCredentialsAll(passcredentialsall), + getter.WithInsecureSkipVerifyTLS(insecureskiptlsverify), + getter.WithTLSClientConfig(certFile, keyFile, caFile), + }, + } + + version := "" + if registry.IsOCI(churl) { + churl, version, err = parseOCIRef(churl) + if err != nil { + return errors.Wrapf(err, "could not parse OCI reference") + } + dl.Options = append(dl.Options, + getter.WithRegistryClient(m.RegistryClient), + getter.WithTagName(version)) + } + + if _, _, err = dl.DownloadTo(churl, version, tmpPath); err != nil { + saveError = errors.Wrapf(err, "could not download %s", churl) + break + } + + churls[churl] = struct{}{} + } + + // TODO: this should probably be refactored to be a []error, so we can capture and provide more information rather than "last error wins". + if saveError == nil { + // now we can move all downloaded charts to destPath and delete outdated dependencies + if err := m.safeMoveDeps(deps, tmpPath, destPath); err != nil { + return err + } + } else { + fmt.Fprintln(m.Out, "Save error occurred: ", saveError) + return saveError + } + return nil +} + +func parseOCIRef(chartRef string) (string, string, error) { + refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`) + caps := refTagRegexp.FindStringSubmatch(chartRef) + if len(caps) != 4 { + return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef) + } + chartRef = caps[1] + tag := caps[3] + + return chartRef, tag, nil +} + +// safeMoveDep moves all dependencies in the source and moves them into dest. +// +// It does this by first matching the file name to an expected pattern, then loading +// the file to verify that it is a chart. +// +// Any charts in dest that do not exist in source are removed (barring local dependencies) +// +// Because it requires tar file introspection, it is more intensive than a basic move. +// +// This will only return errors that should stop processing entirely. Other errors +// will emit log messages or be ignored. +func (m *Manager) safeMoveDeps(deps []*chart.Dependency, source, dest string) error { + existsInSourceDirectory := map[string]bool{} + isLocalDependency := map[string]bool{} + sourceFiles, err := os.ReadDir(source) + if err != nil { + return err + } + // attempt to read destFiles; fail fast if we can't + destFiles, err := os.ReadDir(dest) + if err != nil { + return err + } + + for _, dep := range deps { + if dep.Repository == "" { + isLocalDependency[dep.Name] = true + } + } + + for _, file := range sourceFiles { + if file.IsDir() { + continue + } + filename := file.Name() + sourcefile := filepath.Join(source, filename) + destfile := filepath.Join(dest, filename) + existsInSourceDirectory[filename] = true + if _, err := loader.LoadFile(sourcefile); err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for moving: %s (Skipping)", sourcefile, err) + continue + } + // NOTE: no need to delete the dest; os.Rename replaces it. + if err := fs.RenameWithFallback(sourcefile, destfile); err != nil { + fmt.Fprintf(m.Out, "Unable to move %s to charts dir %s (Skipping)", sourcefile, err) + continue + } + } + + fmt.Fprintln(m.Out, "Deleting outdated charts") + // find all files that exist in dest that do not exist in source; delete them (outdated dependencies) + for _, file := range destFiles { + if !file.IsDir() && !existsInSourceDirectory[file.Name()] { + fname := filepath.Join(dest, file.Name()) + ch, err := loader.LoadFile(fname) + if err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)\n", fname, err) + continue + } + // local dependency - skip + if isLocalDependency[ch.Name()] { + continue + } + if err := os.Remove(fname); err != nil { + fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err) + continue + } + } + } + + return nil +} + +// hasAllRepos ensures that all of the referenced deps are in the local repo cache. +func (m *Manager) hasAllRepos(deps []*chart.Dependency) error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} +Loop: + for _, dd := range deps { + // If repo is from local path or OCI, continue + if strings.HasPrefix(dd.Repository, "file://") || registry.IsOCI(dd.Repository) { + continue + } + + if dd.Repository == "" { + continue + } + for _, repo := range repos { + if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) { + continue Loop + } + } + missing = append(missing, dd.Repository) + } + if len(missing) > 0 { + return ErrRepoNotFound{missing} + } + return nil +} + +// ensureMissingRepos attempts to ensure the repository information for repos +// not managed by Helm is present. This takes in the repoNames Helm is configured +// to work with along with the chart dependencies. It will find the deps not +// in a known repo and attempt to ensure the data is present for steps like +// version resolution. +func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.Dependency) (map[string]string, error) { + + var ru []*repo.Entry + + for _, dd := range deps { + + // If the chart is in the local charts directory no repository needs + // to be specified. + if dd.Repository == "" { + continue + } + + // When the repoName for a dependency is known we can skip ensuring + if _, ok := repoNames[dd.Name]; ok { + continue + } + + // The generated repository name, which will result in an index being + // locally cached, has a name pattern of "helm-manager-" followed by a + // sha256 of the repo name. This assumes end users will never create + // repositories with these names pointing to other repositories. Using + // this method of naming allows the existing repository pulling and + // resolution code to do most of the work. + rn, err := key(dd.Repository) + if err != nil { + return repoNames, err + } + rn = managerKeyPrefix + rn + + repoNames[dd.Name] = rn + + // Assuming the repository is generally available. For Helm managed + // access controls the repository needs to be added through the user + // managed system. This path will work for public charts, like those + // supplied by Bitnami, but not for protected charts, like corp ones + // behind a username and pass. + ri := &repo.Entry{ + Name: rn, + URL: dd.Repository, + } + ru = append(ru, ri) + } + + // Calls to UpdateRepositories (a public function) will only update + // repositories configured by the user. Here we update repos found in + // the dependencies that are not known to the user if update skipping + // is not configured. + if !m.SkipUpdate && len(ru) > 0 { + fmt.Fprintln(m.Out, "Getting updates for unmanaged Helm repositories...") + if err := m.parallelRepoUpdate(ru); err != nil { + return repoNames, err + } + } + + return repoNames, nil +} + +// resolveRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file +// and replaces aliased repository URLs into resolved URLs in dependencies. +func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + if os.IsNotExist(err) { + return make(map[string]string), nil + } + return nil, err + } + repos := rf.Repositories + + reposMap := make(map[string]string) + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} + for _, dd := range deps { + // Don't map the repository, we don't need to download chart from charts directory + if dd.Repository == "" { + continue + } + // if dep chart is from local path, verify the path is valid + if strings.HasPrefix(dd.Repository, "file://") { + if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil { + return nil, err + } + + if m.Debug { + fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository) + } + reposMap[dd.Name] = dd.Repository + continue + } + + if registry.IsOCI(dd.Repository) { + reposMap[dd.Name] = dd.Repository + continue + } + + found := false + + for _, repo := range repos { + if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) || + (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) { + found = true + dd.Repository = repo.URL + reposMap[dd.Name] = repo.Name + break + } else if urlutil.Equal(repo.URL, dd.Repository) { + found = true + reposMap[dd.Name] = repo.Name + break + } + } + if !found { + repository := dd.Repository + // Add if URL + _, err := url.ParseRequestURI(repository) + if err == nil { + reposMap[repository] = repository + continue + } + missing = append(missing, repository) + } + } + if len(missing) > 0 { + errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) + // It is common for people to try to enter "stable" as a repository instead of the actual URL. + // For this case, let's give them a suggestion. + containsNonURL := false + for _, repo := range missing { + if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { + containsNonURL = true + } + } + if containsNonURL { + errorMessage += ` +Note that repositories must be URLs or aliases. For example, to refer to the "example" +repository, use "https://charts.example.com/" or "@example" instead of +"example". Don't forget to add the repo, too ('helm repo add').` + } + return nil, errors.New(errorMessage) + } + return reposMap, nil +} + +// UpdateRepositories updates all of the local repos to the latest. +func (m *Manager) UpdateRepositories() error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + if len(repos) > 0 { + fmt.Fprintln(m.Out, "Hang tight while we grab the latest from your chart repositories...") + // This prints warnings straight to out. + if err := m.parallelRepoUpdate(repos); err != nil { + return err + } + fmt.Fprintln(m.Out, "Update Complete. ⎈Happy Helming!⎈") + } + return nil +} + +func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { + + var wg sync.WaitGroup + for _, c := range repos { + r, err := repo.NewChartRepository(c, m.Getters) + if err != nil { + return err + } + r.CachePath = m.RepositoryCache + wg.Add(1) + go func(r *repo.ChartRepository) { + if _, err := r.DownloadIndexFile(); err != nil { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository:\n\t%s\n", r.Config.URL, err) + } else { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err) + } + } else { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.URL) + } else { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.Name) + } + } + wg.Done() + }(r) + } + wg.Wait() + + return nil +} + +// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified. +// +// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the +// newest version will be returned. +// +// repoURL is the repository to search +// +// If it finds a URL that is "relative", it will prepend the repoURL. +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, insecureskiptlsverify, passcredentialsall bool, caFile, certFile, keyFile string, err error) { + if registry.IsOCI(repoURL) { + return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, false, "", "", "", nil + } + + for _, cr := range repos { + + if urlutil.Equal(repoURL, cr.Config.URL) { + var entry repo.ChartVersions + entry, err = findEntryByName(name, cr) + if err != nil { + // TODO: Where linting is skipped in this function we should + // refactor to remove naked returns while ensuring the same + // behavior + //nolint:nakedret + return + } + var ve *repo.ChartVersion + ve, err = findVersionedEntry(version, entry) + if err != nil { + //nolint:nakedret + return + } + url, err = normalizeURL(repoURL, ve.URLs[0]) + if err != nil { + //nolint:nakedret + return + } + username = cr.Config.Username + password = cr.Config.Password + passcredentialsall = cr.Config.PassCredentialsAll + insecureskiptlsverify = cr.Config.InsecureSkipTLSverify + caFile = cr.Config.CAFile + certFile = cr.Config.CertFile + keyFile = cr.Config.KeyFile + //nolint:nakedret + return + } + } + url, err = repo.FindChartInRepoURL(repoURL, name, version, certFile, keyFile, caFile, m.Getters) + if err == nil { + return url, username, password, false, false, "", "", "", err + } + err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err) + return url, username, password, false, false, "", "", "", err +} + +// findEntryByName finds an entry in the chart repository whose name matches the given name. +// +// It returns the ChartVersions for that entry. +func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) { + for ename, entry := range cr.IndexFile.Entries { + if ename == name { + return entry, nil + } + } + return nil, errors.New("entry not found") +} + +// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints. +// +// If version is empty, the first chart found is returned. +func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) { + for _, verEntry := range vers { + if len(verEntry.URLs) == 0 { + // Not a legit entry. + continue + } + + if version == "" || versionEquals(version, verEntry.Version) { + return verEntry, nil + } + } + return nil, errors.New("no matching version") +} + +func versionEquals(v1, v2 string) bool { + sv1, err := semver.NewVersion(v1) + if err != nil { + // Fallback to string comparison. + return v1 == v2 + } + sv2, err := semver.NewVersion(v2) + if err != nil { + return false + } + return sv1.Equal(sv2) +} + +func normalizeURL(baseURL, urlOrPath string) (string, error) { + u, err := url.Parse(urlOrPath) + if err != nil { + return urlOrPath, err + } + if u.IsAbs() { + return u.String(), nil + } + u2, err := url.Parse(baseURL) + if err != nil { + return urlOrPath, errors.Wrap(err, "base URL failed to parse") + } + + u2.RawPath = path.Join(u2.RawPath, urlOrPath) + u2.Path = path.Join(u2.Path, urlOrPath) + return u2.String(), nil +} + +// loadChartRepositories reads the repositories.yaml, and then builds a map of +// ChartRepositories. +// +// The key is the local name (which is only present in the repositories.yaml). +func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) { + indices := map[string]*repo.ChartRepository{} + + // Load repositories.yaml file + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return indices, errors.Wrapf(err, "failed to load %s", m.RepositoryConfig) + } + + for _, re := range rf.Repositories { + lname := re.Name + idxFile := filepath.Join(m.RepositoryCache, helmpath.CacheIndexFile(lname)) + index, err := repo.LoadIndexFile(idxFile) + if err != nil { + return indices, err + } + + // TODO: use constructor + cr := &repo.ChartRepository{ + Config: re, + IndexFile: index, + } + indices[lname] = cr + } + return indices, nil +} + +// writeLock writes a lockfile to disk +func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error { + data, err := yaml.Marshal(lock) + if err != nil { + return err + } + lockfileName := "Chart.lock" + if legacyLockfile { + lockfileName = "requirements.lock" + } + dest := filepath.Join(chartpath, lockfileName) + return os.WriteFile(dest, data, 0644) +} + +// archive a dep chart from local directory and save it into destPath +func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, error) { + if !strings.HasPrefix(repo, "file://") { + return "", errors.Errorf("wrong format: chart %s repository %s", name, repo) + } + + origPath, err := resolver.GetLocalPath(repo, chartpath) + if err != nil { + return "", err + } + + ch, err := loader.LoadDir(origPath) + if err != nil { + return "", err + } + + constraint, err := semver.NewConstraint(version) + if err != nil { + return "", errors.Wrapf(err, "dependency %s has an invalid version/constraint format", name) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return "", err + } + + if constraint.Check(v) { + _, err = chartutil.Save(ch, destPath) + return ch.Metadata.Version, err + } + + return "", errors.Errorf("can't get a valid version for dependency %s", name) +} + +// The prefix to use for cache keys created by the manager for repo names +const managerKeyPrefix = "helm-manager-" + +// key is used to turn a name, such as a repository url, into a filesystem +// safe name that is unique for querying. To accomplish this a unique hash of +// the string is used. +func key(name string) (string, error) { + in := strings.NewReader(name) + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", nil + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/doc.go b/vendor/helm.sh/helm/v3/pkg/engine/doc.go new file mode 100644 index 000000000000..6b3443aaf0c3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/doc.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package engine implements the Go text template engine as needed for Helm. + +When Helm renders templates it does so with additional functions and different +modes (e.g., strict, lint mode). This package handles the helm specific +implementation. +*/ +package engine // import "helm.sh/helm/v3/pkg/engine" diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go new file mode 100644 index 000000000000..d8ee313e1eaa --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -0,0 +1,441 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "log" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" +) + +// Engine is an implementation of the Helm rendering implementation for templates. +type Engine struct { + // If strict is enabled, template rendering will fail if a template references + // a value that was not passed in. + Strict bool + // In LintMode, some 'required' template values may be missing, so don't fail + LintMode bool + // optional provider of clients to talk to the Kubernetes API + clientProvider *ClientProvider + // EnableDNS tells the engine to allow DNS lookups when rendering templates + EnableDNS bool +} + +// New creates a new instance of Engine using the passed in rest config. +func New(config *rest.Config) Engine { + var clientProvider ClientProvider = clientProviderFromConfig{config} + return Engine{ + clientProvider: &clientProvider, + } +} + +// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates. +// +// Render can be called repeatedly on the same engine. +// +// This will look in the chart's 'templates' data (e.g. the 'templates/' directory) +// and attempt to render the templates there using the values passed in. +// +// Values are scoped to their templates. A dependency template will not have +// access to the values set for its parent. If chart "foo" includes chart "bar", +// "bar" will not have access to the values for "foo". +// +// Values should be prepared with something like `chartutils.ReadValues`. +// +// Values are passed through the templates according to scope. If the top layer +// chart includes the chart foo, which includes the chart bar, the values map +// will be examined for a table called "foo". If "foo" is found in vals, +// that section of the values will be passed into the "foo" chart. And if that +// section contains a value named "bar", that value will be passed on to the +// bar chart during render time. +func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + tmap := allTemplates(chrt, values) + return e.render(tmap) +} + +// Render takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. +func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + return new(Engine).Render(chrt, values) +} + +// RenderWithClient takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client. +func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { + var clientProvider ClientProvider = clientProviderFromConfig{config} + return Engine{ + clientProvider: &clientProvider, + }.Render(chrt, values) +} + +// RenderWithClientProvider takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client. +// This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed. +func RenderWithClientProvider(chrt *chart.Chart, values chartutil.Values, clientProvider ClientProvider) (map[string]string, error) { + return Engine{ + clientProvider: &clientProvider, + }.Render(chrt, values) +} + +// renderable is an object that can be rendered. +type renderable struct { + // tpl is the current template. + tpl string + // vals are the values to be supplied to the template. + vals chartutil.Values + // namespace prefix to the templates of the current chart + basePath string +} + +const warnStartDelim = "HELM_ERR_START" +const warnEndDelim = "HELM_ERR_END" +const recursionMaxNums = 1000 + +var warnRegex = regexp.MustCompile(warnStartDelim + `((?s).*)` + warnEndDelim) + +func warnWrap(warn string) string { + return warnStartDelim + warn + warnEndDelim +} + +// 'include' needs to be defined in the scope of a 'tpl' template as +// well as regular file-loaded templates. +func includeFun(t *template.Template, includedNames map[string]int) func(string, interface{}) (string, error) { + return func(name string, data interface{}) (string, error) { + var buf strings.Builder + if v, ok := includedNames[name]; ok { + if v > recursionMaxNums { + return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) + } + includedNames[name]++ + } else { + includedNames[name] = 1 + } + err := t.ExecuteTemplate(&buf, name, data) + includedNames[name]-- + return buf.String(), err + } +} + +// As does 'tpl', so that nested calls to 'tpl' see the templates +// defined by their enclosing contexts. +func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, interface{}) (string, error) { + return func(tpl string, vals interface{}) (string, error) { + t, err := parent.Clone() + if err != nil { + return "", errors.Wrapf(err, "cannot clone template") + } + + // Re-inject the missingkey option, see text/template issue https://github.com/golang/go/issues/43022 + // We have to go by strict from our engine configuration, as the option fields are private in Template. + // TODO: Remove workaround (and the strict parameter) once we build only with golang versions with a fix. + if strict { + t.Option("missingkey=error") + } else { + t.Option("missingkey=zero") + } + + // Re-inject 'include' so that it can close over our clone of t; + // this lets any 'define's inside tpl be 'include'd. + t.Funcs(template.FuncMap{ + "include": includeFun(t, includedNames), + "tpl": tplFun(t, includedNames, strict), + }) + + // We need a .New template, as template text which is just blanks + // or comments after parsing out defines just adds new named + // template definitions without changing the main template. + // https://pkg.go.dev/text/template#Template.Parse + // Use the parent's name for lack of a better way to identify the tpl + // text string. (Maybe we could use a hash appended to the name?) + t, err = t.New(parent.Name()).Parse(tpl) + if err != nil { + return "", errors.Wrapf(err, "cannot parse template %q", tpl) + } + + var buf strings.Builder + if err := t.Execute(&buf, vals); err != nil { + return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) + } + + // See comment in renderWithReferences explaining the hack. + return strings.ReplaceAll(buf.String(), "", ""), nil + } +} + +// initFunMap creates the Engine's FuncMap and adds context-specific functions. +func (e Engine) initFunMap(t *template.Template) { + funcMap := funcMap() + includedNames := make(map[string]int) + + // Add the template-rendering functions here so we can close over t. + funcMap["include"] = includeFun(t, includedNames) + funcMap["tpl"] = tplFun(t, includedNames, e.Strict) + + // Add the `required` function here so we can use lintMode + funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { + if val == nil { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.New(warnWrap(warn)) + } else if _, ok := val.(string); ok { + if val == "" { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.New(warnWrap(warn)) + } + } + return val, nil + } + + // Override sprig fail function for linting and wrapping message + funcMap["fail"] = func(msg string) (string, error) { + if e.LintMode { + // Don't fail when linting + log.Printf("[INFO] Fail: %s", msg) + return "", nil + } + return "", errors.New(warnWrap(msg)) + } + + // If we are not linting and have a cluster connection, provide a Kubernetes-backed + // implementation. + if !e.LintMode && e.clientProvider != nil { + funcMap["lookup"] = newLookupFunction(*e.clientProvider) + } + + // When DNS lookups are not enabled override the sprig function and return + // an empty string. + if !e.EnableDNS { + funcMap["getHostByName"] = func(_ string) string { + return "" + } + } + + t.Funcs(funcMap) +} + +// render takes a map of templates/values and renders them. +func (e Engine) render(tpls map[string]renderable) (rendered map[string]string, err error) { + // Basically, what we do here is start with an empty parent template and then + // build up a list of templates -- one for each file. Once all of the templates + // have been parsed, we loop through again and execute every template. + // + // The idea with this process is to make it possible for more complex templates + // to share common blocks, but to make the entire thing feel like a file-based + // template engine. + defer func() { + if r := recover(); r != nil { + err = errors.Errorf("rendering template failed: %v", r) + } + }() + t := template.New("gotpl") + if e.Strict { + t.Option("missingkey=error") + } else { + // Not that zero will attempt to add default values for types it knows, + // but will still emit for others. We mitigate that later. + t.Option("missingkey=zero") + } + + e.initFunMap(t) + + // We want to parse the templates in a predictable order. The order favors + // higher-level (in file system) templates over deeply nested templates. + keys := sortTemplates(tpls) + + for _, filename := range keys { + r := tpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + + rendered = make(map[string]string, len(keys)) + for _, filename := range keys { + // Don't render partials. We don't care out the direct output of partials. + // They are only included from other templates. + if strings.HasPrefix(path.Base(filename), "_") { + continue + } + // At render time, add information about the template that is being rendered. + vals := tpls[filename].vals + vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath} + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, filename, vals); err != nil { + return map[string]string{}, cleanupExecError(filename, err) + } + + // Work around the issue where Go will emit "" even if Options(missing=zero) + // is set. Since missing=error will never get here, we do not need to handle + // the Strict case. + rendered[filename] = strings.ReplaceAll(buf.String(), "", "") + } + + return rendered, nil +} + +func cleanupParseError(filename string, err error) error { + tokens := strings.Split(err.Error(), ": ") + if len(tokens) == 1 { + // This might happen if a non-templating error occurs + return fmt.Errorf("parse error in (%s): %s", filename, err) + } + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + // The remaining tokens make up a stacktrace-like chain, ending with the relevant error + errMsg := tokens[len(tokens)-1] + return fmt.Errorf("parse error at (%s): %s", string(location), errMsg) +} + +func cleanupExecError(filename string, err error) error { + if _, isExecError := err.(template.ExecError); !isExecError { + return err + } + + tokens := strings.SplitN(err.Error(), ": ", 3) + if len(tokens) != 3 { + // This might happen if a non-templating error occurs + return fmt.Errorf("execution error in (%s): %s", filename, err) + } + + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + + parts := warnRegex.FindStringSubmatch(tokens[2]) + if len(parts) >= 2 { + return fmt.Errorf("execution error at (%s): %s", string(location), parts[1]) + } + + return err +} + +func sortTemplates(tpls map[string]renderable) []string { + keys := make([]string, len(tpls)) + i := 0 + for key := range tpls { + keys[i] = key + i++ + } + sort.Sort(sort.Reverse(byPathLen(keys))) + return keys +} + +type byPathLen []string + +func (p byPathLen) Len() int { return len(p) } +func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] } +func (p byPathLen) Less(i, j int) bool { + a, b := p[i], p[j] + ca, cb := strings.Count(a, "/"), strings.Count(b, "/") + if ca == cb { + return strings.Compare(a, b) == -1 + } + return ca < cb +} + +// allTemplates returns all templates for a chart and its dependencies. +// +// As it goes, it also prepares the values in a scope-sensitive manner. +func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { + templates := make(map[string]renderable) + recAllTpls(c, templates, vals) + return templates +} + +// recAllTpls recurses through the templates in a chart. +// +// As it recurses, it also sets the values to be appropriate for the template +// scope. +func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} { + subCharts := make(map[string]interface{}) + chartMetaData := struct { + chart.Metadata + IsRoot bool + }{*c.Metadata, c.IsRoot()} + + next := map[string]interface{}{ + "Chart": chartMetaData, + "Files": newFiles(c.Files), + "Release": vals["Release"], + "Capabilities": vals["Capabilities"], + "Values": make(chartutil.Values), + "Subcharts": subCharts, + } + + // If there is a {{.Values.ThisChart}} in the parent metadata, + // copy that into the {{.Values}} for this template. + if c.IsRoot() { + next["Values"] = vals["Values"] + } else if vs, err := vals.Table("Values." + c.Name()); err == nil { + next["Values"] = vs + } + + for _, child := range c.Dependencies() { + subCharts[child.Name()] = recAllTpls(child, templates, next) + } + + newParentID := c.ChartFullPath() + for _, t := range c.Templates { + if t == nil { + continue + } + if !isTemplateValid(c, t.Name) { + continue + } + templates[path.Join(newParentID, t.Name)] = renderable{ + tpl: string(t.Data), + vals: next, + basePath: path.Join(newParentID, "templates"), + } + } + + return next +} + +// isTemplateValid returns true if the template is valid for the chart type +func isTemplateValid(ch *chart.Chart, templateName string) bool { + if isLibraryChart(ch) { + return strings.HasPrefix(filepath.Base(templateName), "_") + } + return true +} + +// isLibraryChart returns true if the chart is a library chart +func isLibraryChart(c *chart.Chart) bool { + return strings.EqualFold(c.Metadata.Type, "library") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/files.go b/vendor/helm.sh/helm/v3/pkg/engine/files.go new file mode 100644 index 000000000000..f2cfdb3f3824 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/files.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "encoding/base64" + "path" + "strings" + + "github.com/gobwas/glob" + + "helm.sh/helm/v3/pkg/chart" +) + +// files is a map of files in a chart that can be accessed from a template. +type files map[string][]byte + +// NewFiles creates a new files from chart files. +// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files. +func newFiles(from []*chart.File) files { + files := make(map[string][]byte) + for _, f := range from { + files[f.Name] = f.Data + } + return files +} + +// GetBytes gets a file by path. +// +// The returned data is raw. In a template context, this is identical to calling +// {{index .Files $path}}. +// +// This is intended to be accessed from within a template, so a missed key returns +// an empty []byte. +func (f files) GetBytes(name string) []byte { + if v, ok := f[name]; ok { + return v + } + return []byte{} +} + +// Get returns a string representation of the given file. +// +// Fetch the contents of a file as a string. It is designed to be called in a +// template. +// +// {{.Files.Get "foo"}} +func (f files) Get(name string) string { + return string(f.GetBytes(name)) +} + +// Glob takes a glob pattern and returns another files object only containing +// matched files. +// +// This is designed to be called from a template. +// +// {{ range $name, $content := .Files.Glob("foo/**") }} +// {{ $name }}: | +// {{ .Files.Get($name) | indent 4 }}{{ end }} +func (f files) Glob(pattern string) files { + g, err := glob.Compile(pattern, '/') + if err != nil { + g, _ = glob.Compile("**") + } + + nf := newFiles(nil) + for name, contents := range f { + if g.Match(name) { + nf[name] = contents + } + } + + return nf +} + +// AsConfig turns a Files group and flattens it to a YAML map suitable for +// including in the 'data' section of a Kubernetes ConfigMap definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// +// {{ .Files.Glob("config/**").AsConfig() | indent 4 }} +func (f files) AsConfig() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + // Explicitly convert to strings, and file names + for k, v := range f { + m[path.Base(k)] = string(v) + } + + return toYAML(m) +} + +// AsSecrets returns the base64-encoded value of a Files object suitable for +// including in the 'data' section of a Kubernetes Secret definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// +// {{ .Files.Glob("secrets/*").AsSecrets() | indent 4 }} +func (f files) AsSecrets() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + for k, v := range f { + m[path.Base(k)] = base64.StdEncoding.EncodeToString(v) + } + + return toYAML(m) +} + +// Lines returns each line of a named file (split by "\n") as a slice, so it can +// be ranged over in your templates. +// +// This is designed to be called from a template. +// +// {{ range .Files.Lines "foo/bar.html" }} +// {{ . }}{{ end }} +func (f files) Lines(path string) []string { + if f == nil || f[path] == nil { + return []string{} + } + s := string(f[path]) + if s[len(s)-1] == '\n' { + s = s[:len(s)-1] + } + return strings.Split(s, "\n") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go new file mode 100644 index 000000000000..d03a818c28e8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go @@ -0,0 +1,207 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig/v3" + "sigs.k8s.io/yaml" + goYaml "sigs.k8s.io/yaml/goyaml.v3" +) + +// funcMap returns a mapping of all of the functions that Engine has. +// +// Because some functions are late-bound (e.g. contain context-sensitive +// data), the functions may not all perform identically outside of an Engine +// as they will inside of an Engine. +// +// Known late-bound functions: +// +// - "include" +// - "tpl" +// +// These are late-bound in Engine.Render(). The +// version included in the FuncMap is a placeholder. +func funcMap() template.FuncMap { + f := sprig.TxtFuncMap() + delete(f, "env") + delete(f, "expandenv") + + // Add some extra functionality + extra := template.FuncMap{ + "toToml": toTOML, + "fromToml": fromTOML, + "toYaml": toYAML, + "toYamlPretty": toYAMLPretty, + "fromYaml": fromYAML, + "fromYamlArray": fromYAMLArray, + "toJson": toJSON, + "fromJson": fromJSON, + "fromJsonArray": fromJSONArray, + + // This is a placeholder for the "include" function, which is + // late-bound to a template. By declaring it here, we preserve the + // integrity of the linter. + "include": func(string, interface{}) string { return "not implemented" }, + "tpl": func(string, interface{}) interface{} { return "not implemented" }, + "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil }, + // Provide a placeholder for the "lookup" function, which requires a kubernetes + // connection. + "lookup": func(string, string, string, string) (map[string]interface{}, error) { + return map[string]interface{}{}, nil + }, + } + + for k, v := range extra { + f[k] = v + } + + return f +} + +// toYAML takes an interface, marshals it to yaml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toYAML(v interface{}) string { + data, err := yaml.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(string(data), "\n") +} + +func toYAMLPretty(v interface{}) string { + var data bytes.Buffer + encoder := goYaml.NewEncoder(&data) + encoder.SetIndent(2) + err := encoder.Encode(v) + + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(data.String(), "\n") +} + +// fromYAML converts a YAML document into a map[string]interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromYAML(str string) map[string]interface{} { + m := map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromYAMLArray converts a YAML array into a []interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromYAMLArray(str string) []interface{} { + a := []interface{}{} + + if err := yaml.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} + +// toTOML takes an interface, marshals it to toml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toTOML(v interface{}) string { + b := bytes.NewBuffer(nil) + e := toml.NewEncoder(b) + err := e.Encode(v) + if err != nil { + return err.Error() + } + return b.String() +} + +// fromTOML converts a TOML document into a map[string]interface{}. +// +// This is not a general-purpose TOML parser, and will not parse all valid +// TOML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromTOML(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := toml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// toJSON takes an interface, marshals it to json, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return string(data) +} + +// fromJSON converts a JSON document into a map[string]interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromJSON(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := json.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromJSONArray converts a JSON array into a []interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromJSONArray(str string) []interface{} { + a := []interface{}{} + + if err := json.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go new file mode 100644 index 000000000000..75e85098d163 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -0,0 +1,143 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "log" + "strings" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) + +// NewLookupFunction returns a function for looking up objects in the cluster. +// +// If the resource does not exist, no error is raised. +// +// This function is considered deprecated, and will be renamed in Helm 4. It will no +// longer be a public function. +func NewLookupFunction(config *rest.Config) lookupFunc { + return newLookupFunction(clientProviderFromConfig{config: config}) +} + +type ClientProvider interface { + // GetClientFor returns a dynamic.NamespaceableResourceInterface suitable for interacting with resources + // corresponding to the provided apiVersion and kind, as well as a boolean indicating whether the resources + // are namespaced. + GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) +} + +type clientProviderFromConfig struct { + config *rest.Config +} + +func (c clientProviderFromConfig) GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) { + return getDynamicClientOnKind(apiVersion, kind, c.config) +} + +func newLookupFunction(clientProvider ClientProvider) lookupFunc { + return func(apiversion string, kind string, namespace string, name string) (map[string]interface{}, error) { + var client dynamic.ResourceInterface + c, namespaced, err := clientProvider.GetClientFor(apiversion, kind) + if err != nil { + return map[string]interface{}{}, err + } + if namespaced && namespace != "" { + client = c.Namespace(namespace) + } else { + client = c + } + if name != "" { + // this will return a single object + obj, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } + // this will return a list + obj, err := client.List(context.Background(), metav1.ListOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } +} + +// getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced. +func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) { + gvk := schema.FromAPIVersionAndKind(apiversion, kind) + apiRes, err := getAPIResourceForGVK(gvk, config) + if err != nil { + log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) + } + gvr := schema.GroupVersionResource{ + Group: apiRes.Group, + Version: apiRes.Version, + Resource: apiRes.Name, + } + intf, err := dynamic.NewForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to get dynamic client %s", err) + return nil, false, err + } + res := intf.Resource(gvr) + return res, apiRes.Namespaced, nil +} + +func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) { + res := metav1.APIResource{} + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to create discovery client %s", err) + return res, err + } + resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + return res, err + } + for _, resource := range resList.APIResources { + // if a resource contains a "/" it's referencing a subresource. we don't support subresource for now. + if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") { + res = resource + res.Group = gvk.Group + res.Version = gvk.Version + break + } + } + return res, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/doc.go b/vendor/helm.sh/helm/v3/pkg/getter/doc.go new file mode 100644 index 000000000000..11cf6153eb29 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/doc.go @@ -0,0 +1,22 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package getter provides a generalize tool for fetching data by scheme. + +This provides a method by which the plugin system can load arbitrary protocol +handlers based upon a URL scheme. +*/ +package getter diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go new file mode 100644 index 000000000000..1acb2093dc4c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -0,0 +1,220 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "net/http" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" +) + +// options are generic parameters to be provided to the getter during instantiation. +// +// Getters may or may not ignore these parameters as they are passed in. +type options struct { + url string + certFile string + keyFile string + caFile string + unTar bool + insecureSkipVerifyTLS bool + plainHTTP bool + acceptHeader string + username string + password string + passCredentialsAll bool + userAgent string + version string + registryClient *registry.Client + timeout time.Duration + transport *http.Transport +} + +// Option allows specifying various settings configurable by the user for overriding the defaults +// used when performing Get operations with the Getter. +type Option func(*options) + +// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with +// WithTLSClientConfig to set the TLSClientConfig's server name. +func WithURL(url string) Option { + return func(opts *options) { + opts.url = url + } +} + +// WithAcceptHeader sets the request's Accept header as some REST APIs serve multiple content types +func WithAcceptHeader(header string) Option { + return func(opts *options) { + opts.acceptHeader = header + } +} + +// WithBasicAuth sets the request's Authorization header to use the provided credentials +func WithBasicAuth(username, password string) Option { + return func(opts *options) { + opts.username = username + opts.password = password + } +} + +func WithPassCredentialsAll(pass bool) Option { + return func(opts *options) { + opts.passCredentialsAll = pass + } +} + +// WithUserAgent sets the request's User-Agent header to use the provided agent name. +func WithUserAgent(userAgent string) Option { + return func(opts *options) { + opts.userAgent = userAgent + } +} + +// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked +func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option { + return func(opts *options) { + opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS + } +} + +// WithTLSClientConfig sets the client auth with the provided credentials. +func WithTLSClientConfig(certFile, keyFile, caFile string) Option { + return func(opts *options) { + opts.certFile = certFile + opts.keyFile = keyFile + opts.caFile = caFile + } +} + +func WithPlainHTTP(plainHTTP bool) Option { + return func(opts *options) { + opts.plainHTTP = plainHTTP + } +} + +// WithTimeout sets the timeout for requests +func WithTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.timeout = timeout + } +} + +func WithTagName(tagname string) Option { + return func(opts *options) { + opts.version = tagname + } +} + +func WithRegistryClient(client *registry.Client) Option { + return func(opts *options) { + opts.registryClient = client + } +} + +func WithUntar() Option { + return func(opts *options) { + opts.unTar = true + } +} + +// WithTransport sets the http.Transport to allow overwriting the HTTPGetter default. +func WithTransport(transport *http.Transport) Option { + return func(opts *options) { + opts.transport = transport + } +} + +// Getter is an interface to support GET to the specified URL. +type Getter interface { + // Get file content by url string + Get(url string, options ...Option) (*bytes.Buffer, error) +} + +// Constructor is the function for every getter which creates a specific instance +// according to the configuration +type Constructor func(options ...Option) (Getter, error) + +// Provider represents any getter and the schemes that it supports. +// +// For example, an HTTP provider may provide one getter that handles both +// 'http' and 'https' schemes. +type Provider struct { + Schemes []string + New Constructor +} + +// Provides returns true if the given scheme is supported by this Provider. +func (p Provider) Provides(scheme string) bool { + for _, i := range p.Schemes { + if i == scheme { + return true + } + } + return false +} + +// Providers is a collection of Provider objects. +type Providers []Provider + +// ByScheme returns a Provider that handles the given scheme. +// +// If no provider handles this scheme, this will return an error. +func (p Providers) ByScheme(scheme string) (Getter, error) { + for _, pp := range p { + if pp.Provides(scheme) { + return pp.New() + } + } + return nil, errors.Errorf("scheme %q not supported", scheme) +} + +const ( + // The cost timeout references curl's default connection timeout. + // https://github.com/curl/curl/blob/master/lib/connect.h#L40C21-L40C21 + // The helm commands are usually executed manually. Considering the acceptable waiting time, we reduced the entire request time to 120s. + DefaultHTTPTimeout = 120 +) + +var defaultOptions = []Option{WithTimeout(time.Second * DefaultHTTPTimeout)} + +var httpProvider = Provider{ + Schemes: []string{"http", "https"}, + New: func(options ...Option) (Getter, error) { + options = append(options, defaultOptions...) + return NewHTTPGetter(options...) + }, +} + +var ociProvider = Provider{ + Schemes: []string{registry.OCIScheme}, + New: NewOCIGetter, +} + +// All finds all of the registered getters as a list of Provider instances. +// Currently, the built-in getters and the discovered plugins with downloader +// notations are collected. +func All(settings *cli.EnvSettings) Providers { + result := Providers{httpProvider, ociProvider} + pluginDownloaders, _ := collectPlugins(settings) + result = append(result, pluginDownloaders...) + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go new file mode 100644 index 000000000000..df3dcd91093e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -0,0 +1,161 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "crypto/tls" + "io" + "net/http" + "net/url" + "sync" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/internal/version" +) + +// HTTPGetter is the default HTTP(/S) backend handler +type HTTPGetter struct { + opts options + transport *http.Transport + once sync.Once +} + +// Get performs a Get from repo.Getter and returns the body. +func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&g.opts) + } + return g.get(href) +} + +func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { + // Set a helm specific user agent so that a repo server and metrics can + // separate helm calls from other tools interacting with repos. + req, err := http.NewRequest(http.MethodGet, href, nil) + if err != nil { + return nil, err + } + + if g.opts.acceptHeader != "" { + req.Header.Set("Accept", g.opts.acceptHeader) + } + + req.Header.Set("User-Agent", version.GetUserAgent()) + if g.opts.userAgent != "" { + req.Header.Set("User-Agent", g.opts.userAgent) + } + + // Before setting the basic auth credentials, make sure the URL associated + // with the basic auth is the one being fetched. + u1, err := url.Parse(g.opts.url) + if err != nil { + return nil, errors.Wrap(err, "Unable to parse getter URL") + } + u2, err := url.Parse(href) + if err != nil { + return nil, errors.Wrap(err, "Unable to parse URL getting from") + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if g.opts.username != "" && g.opts.password != "" { + req.SetBasicAuth(g.opts.username, g.opts.password) + } + } + + client, err := g.httpClient() + if err != nil { + return nil, err + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("failed to fetch %s : %s", href, resp.Status) + } + + buf := bytes.NewBuffer(nil) + _, err = io.Copy(buf, resp.Body) + return buf, err +} + +// NewHTTPGetter constructs a valid http/https client as a Getter +func NewHTTPGetter(options ...Option) (Getter, error) { + var client HTTPGetter + + for _, opt := range options { + opt(&client.opts) + } + + return &client, nil +} + +func (g *HTTPGetter) httpClient() (*http.Client, error) { + if g.opts.transport != nil { + return &http.Client{ + Transport: g.opts.transport, + Timeout: g.opts.timeout, + }, nil + } + + g.once.Do(func() { + g.transport = &http.Transport{ + DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + } + }) + + if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS { + tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile, g.opts.insecureSkipVerifyTLS) + if err != nil { + return nil, errors.Wrap(err, "can't create TLS config for client") + } + + sni, err := urlutil.ExtractHostname(g.opts.url) + if err != nil { + return nil, err + } + tlsConf.ServerName = sni + + g.transport.TLSClientConfig = tlsConf + } + + if g.opts.insecureSkipVerifyTLS { + if g.transport.TLSClientConfig == nil { + g.transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else { + g.transport.TLSClientConfig.InsecureSkipVerify = true + } + } + + client := &http.Client{ + Transport: g.transport, + Timeout: g.opts.timeout, + } + + return client, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go new file mode 100644 index 000000000000..5b0522395c6d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/registry" +) + +// OCIGetter is the default HTTP(/S) backend handler +type OCIGetter struct { + opts options + transport *http.Transport + once sync.Once +} + +// Get performs a Get from repo.Getter and returns the body. +func (g *OCIGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&g.opts) + } + return g.get(href) +} + +func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { + client := g.opts.registryClient + // if the user has already provided a configured registry client, use it, + // this is particularly true when user has his own way of handling the client credentials. + if client == nil { + c, err := g.newRegistryClient() + if err != nil { + return nil, err + } + client = c + } + + ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)) + + if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") { + ref = fmt.Sprintf("%s:%s", ref, version) + } + var pullOpts []registry.PullOption + requestingProv := strings.HasSuffix(ref, ".prov") + if requestingProv { + ref = strings.TrimSuffix(ref, ".prov") + pullOpts = append(pullOpts, + registry.PullOptWithChart(false), + registry.PullOptWithProv(true)) + } + + result, err := client.Pull(ref, pullOpts...) + if err != nil { + return nil, err + } + + if requestingProv { + return bytes.NewBuffer(result.Prov.Data), nil + } + return bytes.NewBuffer(result.Chart.Data), nil +} + +// NewOCIGetter constructs a valid http/https client as a Getter +func NewOCIGetter(ops ...Option) (Getter, error) { + var client OCIGetter + + for _, opt := range ops { + opt(&client.opts) + } + + return &client, nil +} + +func (g *OCIGetter) newRegistryClient() (*registry.Client, error) { + if g.opts.transport != nil { + client, err := registry.NewClient( + registry.ClientOptHTTPClient(&http.Client{ + Transport: g.opts.transport, + Timeout: g.opts.timeout, + }), + ) + if err != nil { + return nil, err + } + return client, nil + } + + g.once.Do(func() { + g.transport = &http.Transport{ + // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87 + DisableCompression: true, + DialContext: (&net.Dialer{ + // By default we wrap the transport in retries, so reduce the + // default dial timeout to 5s to avoid 5x 30s of connection + // timeouts when doing the "ping" on certain http registries. + Timeout: 5 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + Proxy: http.ProxyFromEnvironment, + } + }) + + if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS { + tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile, g.opts.insecureSkipVerifyTLS) + if err != nil { + return nil, fmt.Errorf("can't create TLS config for client: %w", err) + } + + sni, err := urlutil.ExtractHostname(g.opts.url) + if err != nil { + return nil, err + } + tlsConf.ServerName = sni + + g.transport.TLSClientConfig = tlsConf + } + + opts := []registry.ClientOption{registry.ClientOptHTTPClient(&http.Client{ + Transport: g.transport, + Timeout: g.opts.timeout, + })} + if g.opts.plainHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + + client, err := registry.NewClient(opts...) + + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go new file mode 100644 index 000000000000..a371b52eb85a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go @@ -0,0 +1,110 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/plugin" +) + +// collectPlugins scans for getter plugins. +// This will load plugins according to the cli. +func collectPlugins(settings *cli.EnvSettings) (Providers, error) { + plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + if err != nil { + return nil, err + } + var result Providers + for _, plugin := range plugins { + for _, downloader := range plugin.Metadata.Downloaders { + result = append(result, Provider{ + Schemes: downloader.Protocols, + New: NewPluginGetter( + downloader.Command, + settings, + plugin.Metadata.Name, + plugin.Dir, + ), + }) + } + } + return result, nil +} + +// pluginGetter is a generic type to invoke custom downloaders, +// implemented in plugins. +type pluginGetter struct { + command string + settings *cli.EnvSettings + name string + base string + opts options +} + +func (p *pluginGetter) setupOptionsEnv(env []string) []string { + env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll)) + return env +} + +// Get runs downloader plugin command +func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&p.opts) + } + commands := strings.Split(p.command, " ") + argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href) + prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) + plugin.SetupPluginEnv(p.settings, p.name, p.base) + prog.Env = p.setupOptionsEnv(os.Environ()) + buf := bytes.NewBuffer(nil) + prog.Stdout = buf + prog.Stderr = os.Stderr + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + return nil, errors.Errorf("plugin %q exited with error", p.command) + } + return nil, err + } + return buf, nil +} + +// NewPluginGetter constructs a valid plugin getter +func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor { + return func(options ...Option) (Getter, error) { + result := &pluginGetter{ + command: command, + settings: settings, + name: name, + base: base, + } + for _, opt := range options { + opt(&result.opts) + } + return result, nil + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/home.go b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go new file mode 100644 index 000000000000..bd43e8890c8f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go @@ -0,0 +1,44 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package helmpath calculates filesystem paths to Helm's configuration, cache and data. +package helmpath + +// This helper builds paths to Helm's configuration, cache and data paths. +const lp = lazypath("helm") + +// ConfigPath returns the path where Helm stores configuration. +func ConfigPath(elem ...string) string { return lp.configPath(elem...) } + +// CachePath returns the path where Helm stores cached objects. +func CachePath(elem ...string) string { return lp.cachePath(elem...) } + +// DataPath returns the path where Helm stores data. +func DataPath(elem ...string) string { return lp.dataPath(elem...) } + +// CacheIndexFile returns the path to an index for the given named repository. +func CacheIndexFile(name string) string { + if name != "" { + name += "-" + } + return name + "index.yaml" +} + +// CacheChartsFile returns the path to a text file listing all the charts +// within the given named repository. +func CacheChartsFile(name string) string { + if name != "" { + name += "-" + } + return name + "charts.txt" +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go new file mode 100644 index 000000000000..6b4f1fc7703f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go @@ -0,0 +1,72 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helmpath + +import ( + "os" + "path/filepath" + + "helm.sh/helm/v3/pkg/helmpath/xdg" +) + +const ( + // CacheHomeEnvVar is the environment variable used by Helm + // for the cache directory. When no value is set a default is used. + CacheHomeEnvVar = "HELM_CACHE_HOME" + + // ConfigHomeEnvVar is the environment variable used by Helm + // for the config directory. When no value is set a default is used. + ConfigHomeEnvVar = "HELM_CONFIG_HOME" + + // DataHomeEnvVar is the environment variable used by Helm + // for the data directory. When no value is set a default is used. + DataHomeEnvVar = "HELM_DATA_HOME" +) + +// lazypath is a lazy-loaded path buffer for the XDG base directory specification. +type lazypath string + +func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string { + + // There is an order to checking for a path. + // 1. See if a Helm specific environment variable has been set. + // 2. Check if an XDG environment variable is set + // 3. Fall back to a default + base := os.Getenv(helmEnvVar) + if base != "" { + return filepath.Join(base, filepath.Join(elem...)) + } + base = os.Getenv(xdgEnvVar) + if base == "" { + base = defaultFn() + } + return filepath.Join(base, string(l), filepath.Join(elem...)) +} + +// cachePath defines the base directory relative to which user specific non-essential data files +// should be stored. +func (l lazypath) cachePath(elem ...string) string { + return l.path(CacheHomeEnvVar, xdg.CacheHomeEnvVar, cacheHome, filepath.Join(elem...)) +} + +// configPath defines the base directory relative to which user specific configuration files should +// be stored. +func (l lazypath) configPath(elem ...string) string { + return l.path(ConfigHomeEnvVar, xdg.ConfigHomeEnvVar, configHome, filepath.Join(elem...)) +} + +// dataPath defines the base directory relative to which user specific data files should be stored. +func (l lazypath) dataPath(elem ...string) string { + return l.path(DataHomeEnvVar, xdg.DataHomeEnvVar, dataHome, filepath.Join(elem...)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go new file mode 100644 index 000000000000..eba6dde151d6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go @@ -0,0 +1,34 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin + +package helmpath + +import ( + "path/filepath" + + "k8s.io/client-go/util/homedir" +) + +func dataHome() string { + return filepath.Join(homedir.HomeDir(), "Library") +} + +func configHome() string { + return filepath.Join(homedir.HomeDir(), "Library", "Preferences") +} + +func cacheHome() string { + return filepath.Join(homedir.HomeDir(), "Library", "Caches") +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go new file mode 100644 index 000000000000..82fb4b6f1fec --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go @@ -0,0 +1,45 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !darwin + +package helmpath + +import ( + "path/filepath" + + "k8s.io/client-go/util/homedir" +) + +// dataHome defines the base directory relative to which user specific data files should be stored. +// +// If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share is used. +func dataHome() string { + return filepath.Join(homedir.HomeDir(), ".local", "share") +} + +// configHome defines the base directory relative to which user specific configuration files should +// be stored. +// +// If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config is used. +func configHome() string { + return filepath.Join(homedir.HomeDir(), ".config") +} + +// cacheHome defines the base directory relative to which user specific non-essential data files +// should be stored. +// +// If $XDG_CACHE_HOME is either not set or empty, a default equal to $HOME/.cache is used. +func cacheHome() string { + return filepath.Join(homedir.HomeDir(), ".cache") +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go new file mode 100644 index 000000000000..230aee2a933c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go @@ -0,0 +1,24 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package helmpath + +import "os" + +func dataHome() string { return configHome() } + +func configHome() string { return os.Getenv("APPDATA") } + +func cacheHome() string { return os.Getenv("TEMP") } diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go new file mode 100644 index 000000000000..eaa3e6864347 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go @@ -0,0 +1,34 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package xdg holds constants pertaining to XDG Base Directory Specification. +// +// The XDG Base Directory Specification https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html +// specifies the environment variables that define user-specific base directories for various categories of files. +package xdg + +const ( + // CacheHomeEnvVar is the environment variable used by the + // XDG base directory specification for the cache directory. + CacheHomeEnvVar = "XDG_CACHE_HOME" + + // ConfigHomeEnvVar is the environment variable used by the + // XDG base directory specification for the config directory. + ConfigHomeEnvVar = "XDG_CONFIG_HOME" + + // DataHomeEnvVar is the environment variable used by the + // XDG base directory specification for the data directory. + DataHomeEnvVar = "XDG_DATA_HOME" +) diff --git a/vendor/helm.sh/helm/v3/pkg/ignore/doc.go b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go new file mode 100644 index 000000000000..1f5e91847714 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go @@ -0,0 +1,68 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ignore provides tools for writing ignore files (a la .gitignore). + +This provides both an ignore parser and a file-aware processor. + +The format of ignore files closely follows, but does not exactly match, the +format for .gitignore files (https://git-scm.com/docs/gitignore). + +The formatting rules are as follows: + + - Parsing is line-by-line + - Empty lines are ignored + - Lines that begin with # (comments) will be ignored + - Leading and trailing spaces are always ignored + - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) + - There is no support for multi-line patterns + - Shell glob patterns are supported. See Go's "path/filepath".Match + - If a pattern begins with a leading !, the match will be negated. + - If a pattern begins with a leading /, only paths relatively rooted will match. + - If the pattern ends with a trailing /, only directories will match + - If a pattern contains no slashes, file basenames are tested (not paths) + - The pattern sequence "**", while legal in a glob, will cause an error here + (to indicate incompatibility with .gitignore). + +Example: + + # Match any file named foo.txt + foo.txt + + # Match any text file + *.txt + + # Match only directories named mydir + mydir/ + + # Match only text files in the top-level directory + /*.txt + + # Match only the file foo.txt in the top-level directory + /foo.txt + + # Match any file named ab.txt, ac.txt, or ad.txt + a[b-d].txt + +Notable differences from .gitignore: + - The '**' syntax is not supported. + - The globbing library is Go's 'filepath.Match', not fnmatch(3) + - Trailing spaces are always ignored (there is no supported escape sequence) + - The evaluation of escape sequences has not been tested for compatibility + - There is no support for '\!' as a special leading sequence. +*/ +package ignore // import "helm.sh/helm/v3/pkg/ignore" diff --git a/vendor/helm.sh/helm/v3/pkg/ignore/rules.go b/vendor/helm.sh/helm/v3/pkg/ignore/rules.go new file mode 100644 index 000000000000..88de407ad88a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/ignore/rules.go @@ -0,0 +1,228 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ignore + +import ( + "bufio" + "bytes" + "io" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// HelmIgnore default name of an ignorefile. +const HelmIgnore = ".helmignore" + +// Rules is a collection of path matching rules. +// +// Parse() and ParseFile() will construct and populate new Rules. +// Empty() will create an immutable empty ruleset. +type Rules struct { + patterns []*pattern +} + +// Empty builds an empty ruleset. +func Empty() *Rules { + return &Rules{patterns: []*pattern{}} +} + +// AddDefaults adds default ignore patterns. +// +// Ignore all dotfiles in "templates/" +func (r *Rules) AddDefaults() { + r.parseRule(`templates/.?*`) +} + +// ParseFile parses a helmignore file and returns the *Rules. +func ParseFile(file string) (*Rules, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return Parse(f) +} + +// Parse parses a rules file +func Parse(file io.Reader) (*Rules, error) { + r := &Rules{patterns: []*pattern{}} + + s := bufio.NewScanner(file) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for s.Scan() { + scannedBytes := s.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + line := string(scannedBytes) + currentLine++ + + if err := r.parseRule(line); err != nil { + return r, err + } + } + return r, s.Err() +} + +// Ignore evaluates the file at the given path, and returns true if it should be ignored. +// +// Ignore evaluates path against the rules in order. Evaluation stops when a match +// is found. Matching a negative rule will stop evaluation. +func (r *Rules) Ignore(path string, fi os.FileInfo) bool { + // Don't match on empty dirs. + if path == "" { + return false + } + + // Disallow ignoring the current working directory. + // See issue: + // 1776 (New York City) Hamilton: "Pardon me, are you Aaron Burr, sir?" + if path == "." || path == "./" { + return false + } + for _, p := range r.patterns { + if p.match == nil { + log.Printf("ignore: no matcher supplied for %q", p.raw) + return false + } + + // For negative rules, we need to capture and return non-matches, + // and continue for matches. + if p.negate { + if p.mustDir && !fi.IsDir() { + return true + } + if !p.match(path, fi) { + return true + } + continue + } + + // If the rule is looking for directories, and this is not a directory, + // skip it. + if p.mustDir && !fi.IsDir() { + continue + } + if p.match(path, fi) { + return true + } + } + return false +} + +// parseRule parses a rule string and creates a pattern, which is then stored in the Rules object. +func (r *Rules) parseRule(rule string) error { + rule = strings.TrimSpace(rule) + + // Ignore blank lines + if rule == "" { + return nil + } + // Comment + if strings.HasPrefix(rule, "#") { + return nil + } + + // Fail any rules that contain ** + if strings.Contains(rule, "**") { + return errors.New("double-star (**) syntax is not supported") + } + + // Fail any patterns that can't compile. A non-empty string must be + // given to Match() to avoid optimization that skips rule evaluation. + if _, err := filepath.Match(rule, "abc"); err != nil { + return err + } + + p := &pattern{raw: rule} + + // Negation is handled at a higher level, so strip the leading ! from the + // string. + if strings.HasPrefix(rule, "!") { + p.negate = true + rule = rule[1:] + } + + // Directory verification is handled by a higher level, so the trailing / + // is removed from the rule. That way, a directory named "foo" matches, + // even if the supplied string does not contain a literal slash character. + if strings.HasSuffix(rule, "/") { + p.mustDir = true + rule = strings.TrimSuffix(rule, "/") + } + + if strings.HasPrefix(rule, "/") { + // Require path matches the root path. + p.match = func(n string, _ os.FileInfo) bool { + rule = strings.TrimPrefix(rule, "/") + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else if strings.Contains(rule, "/") { + // require structural match. + p.match = func(n string, _ os.FileInfo) bool { + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else { + p.match = func(n string, _ os.FileInfo) bool { + // When there is no slash in the pattern, we evaluate ONLY the + // filename. + n = filepath.Base(n) + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } + + r.patterns = append(r.patterns, p) + return nil +} + +// matcher is a function capable of computing a match. +// +// It returns true if the rule matches. +type matcher func(name string, fi os.FileInfo) bool + +// pattern describes a pattern to be matched in a rule set. +type pattern struct { + // raw is the unparsed string, with nothing stripped. + raw string + // match is the matcher function. + match matcher + // negate indicates that the rule's outcome should be negated. + negate bool + // mustDir indicates that the matched file must be a directory. + mustDir bool +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go new file mode 100644 index 000000000000..6e71421199ea --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -0,0 +1,933 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + multierror "github.com/hashicorp/go-multierror" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + cachetools "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +// ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. +var ErrNoObjectsVisited = errors.New("no objects visited") + +var metadataAccessor = meta.NewAccessor() + +// ManagedFieldsManager is the name of the manager of Kubernetes managedFields +// first introduced in Kubernetes 1.18 +var ManagedFieldsManager string + +// Client represents a client capable of communicating with the Kubernetes API. +type Client struct { + // Factory provides a minimal version of the kubectl Factory interface. If + // you need the full Factory you can type switch to the full interface. + // Since Kubernetes Go API does not provide backwards compatibility across + // minor versions, this API does not follow Helm backwards compatibility. + // Helm is exposing Kubernetes in this property and cannot guarantee this + // will not change. The minimal interface only has the functions that Helm + // needs. The smaller surface area of the interface means there is a lower + // chance of it changing. + Factory Factory + Log func(string, ...interface{}) + // Namespace allows to bypass the kubeconfig file for the choice of the namespace + Namespace string + + kubeClient kubernetes.Interface +} + +func init() { + // Add CRDs to the scheme. They are missing by default. + if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { + // This should never happen. + panic(err) + } + if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } +} + +// New creates a new Client. +func New(getter genericclioptions.RESTClientGetter) *Client { + if getter == nil { + getter = genericclioptions.NewConfigFlags(true) + } + return &Client{ + Factory: cmdutil.NewFactory(getter), + Log: nopLogger, + } +} + +var nopLogger = func(_ string, _ ...interface{}) {} + +// getKubeClient get or create a new KubernetesClientSet +func (c *Client) getKubeClient() (kubernetes.Interface, error) { + var err error + if c.kubeClient == nil { + c.kubeClient, err = c.Factory.KubernetesClientSet() + } + + return c.kubeClient, err +} + +// IsReachable tests connectivity to the cluster. +func (c *Client) IsReachable() error { + client, err := c.getKubeClient() + if err == genericclioptions.ErrEmptyConfig { + // re-replace kubernetes ErrEmptyConfig error with a friendly error + // moar workarounds for Kubernetes API breaking. + return errors.New("Kubernetes cluster unreachable") + } + if err != nil { + return errors.Wrap(err, "Kubernetes cluster unreachable") + } + if _, err := client.Discovery().ServerVersion(); err != nil { + return errors.Wrap(err, "Kubernetes cluster unreachable") + } + return nil +} + +// Create creates Kubernetes resources specified in the resource list. +func (c *Client) Create(resources ResourceList) (*Result, error) { + c.Log("creating %d resource(s)", len(resources)) + if err := perform(resources, createResource); err != nil { + return nil, err + } + return &Result{Created: resources}, nil +} + +func transformRequests(req *rest.Request) { + tableParam := strings.Join([]string{ + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), + "application/json", + }, ",") + req.SetHeader("Accept", tableParam) + + // if sorting, ensure we receive the full object in order to introspect its fields via jsonpath + req.Param("includeObject", "Object") +} + +// Get retrieves the resource objects supplied. If related is set to true the +// related pods are fetched as well. If the passed in resources are a table kind +// the related resources will also be fetched as kind=table. +func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime.Object, error) { + buf := new(bytes.Buffer) + objs := make(map[string][]runtime.Object) + + podSelectors := []map[string]string{} + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + gvk := info.ResourceMapping().GroupVersionKind + vk := gvk.Version + "/" + gvk.Kind + obj, err := getResource(info) + if err != nil { + fmt.Fprintf(buf, "Get resource %s failed, err:%v\n", info.Name, err) + } else { + objs[vk] = append(objs[vk], obj) + + // Only fetch related pods if they are requested + if related { + // Discover if the existing object is a table. If it is, request + // the pods as Tables. Otherwise request them normally. + objGVK := obj.GetObjectKind().GroupVersionKind() + var isTable bool + if objGVK.Kind == "Table" { + isTable = true + } + + objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) + if err != nil { + c.Log("Warning: get the relation pod is failed, err:%s", err.Error()) + } + } + } + + return nil + }) + if err != nil { + return nil, err + } + + return objs, nil +} + +func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]runtime.Object, table bool, podSelectors *[]map[string]string) (map[string][]runtime.Object, error) { + if info == nil { + return objs, nil + } + c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) + selector, ok, _ := getSelectorFromObject(info.Object) + if !ok { + return objs, nil + } + + for index := range *podSelectors { + if reflect.DeepEqual((*podSelectors)[index], selector) { + // check if pods for selectors are already added. This avoids duplicate printing of pods + return objs, nil + } + } + + *podSelectors = append(*podSelectors, selector) + + var infos []*resource.Info + var err error + if table { + infos, err = c.Factory.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(info.Namespace). + DefaultNamespace(). + ResourceTypes("pods"). + LabelSelector(labels.Set(selector).AsSelector().String()). + TransformRequests(transformRequests). + Do().Infos() + if err != nil { + return objs, err + } + } else { + infos, err = c.Factory.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(info.Namespace). + DefaultNamespace(). + ResourceTypes("pods"). + LabelSelector(labels.Set(selector).AsSelector().String()). + Do().Infos() + if err != nil { + return objs, err + } + } + vk := "v1/Pod(related)" + + for _, info := range infos { + objs[vk] = append(objs[vk], info.Object) + } + return objs, nil +} + +func getSelectorFromObject(obj runtime.Object) (map[string]string, bool, error) { + typed := obj.(*unstructured.Unstructured) + kind := typed.Object["kind"] + switch kind { + case "ReplicaSet", "Deployment", "StatefulSet", "DaemonSet", "Job": + return unstructured.NestedStringMap(typed.Object, "spec", "selector", "matchLabels") + case "ReplicationController": + return unstructured.NestedStringMap(typed.Object, "spec", "selector") + default: + return nil, false, nil + } +} + +func getResource(info *resource.Info) (runtime.Object, error) { + obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name) + if err != nil { + return nil, err + } + return obj, nil +} + +// Wait waits up to the given timeout for the specified resources to be ready. +func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { + cs, err := c.getKubeClient() + if err != nil { + return err + } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := waiter{ + c: checker, + log: c.Log, + timeout: timeout, + } + return w.waitForResources(resources) +} + +// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. +func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + cs, err := c.getKubeClient() + if err != nil { + return err + } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) + w := waiter{ + c: checker, + log: c.Log, + timeout: timeout, + } + return w.waitForResources(resources) +} + +// WaitForDelete wait up to the given timeout for the specified resources to be deleted. +func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { + w := waiter{ + log: c.Log, + timeout: timeout, + } + return w.waitForDeletedResources(resources) +} + +func (c *Client) namespace() string { + if c.Namespace != "" { + return c.Namespace + } + if ns, _, err := c.Factory.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } + return v1.NamespaceDefault +} + +// newBuilder returns a new resource builder for structured api objects. +func (c *Client) newBuilder() *resource.Builder { + return c.Factory.NewBuilder(). + ContinueOnError(). + NamespaceParam(c.namespace()). + DefaultNamespace(). + Flatten() +} + +// Build validates for Kubernetes objects and returns unstructured infos. +func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) { + validationDirective := metav1.FieldValidationIgnore + if validate { + validationDirective = metav1.FieldValidationStrict + } + + schema, err := c.Factory.Validator(validationDirective) + if err != nil { + return nil, err + } + result, err := c.newBuilder(). + Unstructured(). + Schema(schema). + Stream(reader, ""). + Do().Infos() + return result, scrubValidationError(err) +} + +// BuildTable validates for Kubernetes objects and returns unstructured infos. +// The returned kind is a Table. +func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, error) { + validationDirective := metav1.FieldValidationIgnore + if validate { + validationDirective = metav1.FieldValidationStrict + } + + schema, err := c.Factory.Validator(validationDirective) + if err != nil { + return nil, err + } + result, err := c.newBuilder(). + Unstructured(). + Schema(schema). + Stream(reader, ""). + TransformRequests(transformRequests). + Do().Infos() + return result, scrubValidationError(err) +} + +func (c *Client) update(original, target ResourceList, force, threeWayMerge bool) (*Result, error) { + updateErrors := []string{} + res := &Result{} + + c.Log("checking %d resources for changes", len(target)) + err := target.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()) + if _, err := helper.Get(info.Namespace, info.Name); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "could not get information about the resource") + } + + // Append the created resource to the results, even if something fails + res.Created = append(res.Created, info) + + // Since the resource does not exist, create it. + if err := createResource(info); err != nil { + return errors.Wrap(err, "failed to create resource") + } + + kind := info.Mapping.GroupVersionKind.Kind + c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace) + return nil + } + + originalInfo := original.Get(info) + if originalInfo == nil { + kind := info.Mapping.GroupVersionKind.Kind + return errors.Errorf("no %s with the name %q found", kind, info.Name) + } + + if err := updateResource(c, info, originalInfo.Object, force, threeWayMerge); err != nil { + c.Log("error updating the resource %q:\n\t %v", info.Name, err) + updateErrors = append(updateErrors, err.Error()) + } + // Because we check for errors later, append the info regardless + res.Updated = append(res.Updated, info) + + return nil + }) + + switch { + case err != nil: + return res, err + case len(updateErrors) != 0: + return res, errors.New(strings.Join(updateErrors, " && ")) + } + + for _, info := range original.Difference(target) { + c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace) + + if err := info.Get(); err != nil { + c.Log("Unable to get obj %q, err: %s", info.Name, err) + continue + } + annotations, err := metadataAccessor.Annotations(info.Object) + if err != nil { + c.Log("Unable to get annotations on %q, err: %s", info.Name, err) + } + if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { + c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy) + continue + } + if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { + c.Log("Failed to delete %q, err: %s", info.ObjectName(), err) + continue + } + res.Deleted = append(res.Deleted, info) + } + return res, nil +} + +// Update takes the current list of objects and target list of objects and +// creates resources that don't already exist, updates resources that have been +// modified in the target configuration, and deletes resources from the current +// configuration that are not present in the target configuration. If an error +// occurs, a Result will still be returned with the error, containing all +// resource updates, creations, and deletions that were attempted. These can be +// used for cleanup or other logging purposes. +// +// The difference to Update is that UpdateThreeWayMerge does a three-way-merge +// for unstructured objects. +func (c *Client) UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) { + return c.update(original, target, force, true) +} + +// Update takes the current list of objects and target list of objects and +// creates resources that don't already exist, updates resources that have been +// modified in the target configuration, and deletes resources from the current +// configuration that are not present in the target configuration. If an error +// occurs, a Result will still be returned with the error, containing all +// resource updates, creations, and deletions that were attempted. These can be +// used for cleanup or other logging purposes. +func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) { + return c.update(original, target, force, false) +} + +// Delete deletes Kubernetes resources specified in the resources list with +// background cascade deletion. It will attempt to delete all resources even +// if one or more fail and collect any errors. All successfully deleted items +// will be returned in the `Deleted` ResourceList that is part of the result. +func (c *Client) Delete(resources ResourceList) (*Result, []error) { + return rdelete(c, resources, metav1.DeletePropagationBackground) +} + +// Delete deletes Kubernetes resources specified in the resources list with +// given deletion propagation policy. It will attempt to delete all resources even +// if one or more fail and collect any errors. All successfully deleted items +// will be returned in the `Deleted` ResourceList that is part of the result. +func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) { + return rdelete(c, resources, policy) +} + +func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { + var errs []error + res := &Result{} + mtx := sync.Mutex{} + err := perform(resources, func(info *resource.Info) error { + c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) + err := deleteResource(info, propagation) + if err == nil || apierrors.IsNotFound(err) { + if err != nil { + c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err) + } + mtx.Lock() + defer mtx.Unlock() + res.Deleted = append(res.Deleted, info) + return nil + } + mtx.Lock() + defer mtx.Unlock() + // Collect the error and continue on + errs = append(errs, err) + return nil + }) + if err != nil { + if errors.Is(err, ErrNoObjectsVisited) { + err = fmt.Errorf("object not found, skipping delete: %w", err) + } + errs = append(errs, err) + } + if errs != nil { + return nil, errs + } + return res, nil +} + +func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error { + return func(info *resource.Info) error { + return c.watchUntilReady(t, info) + } +} + +// WatchUntilReady watches the resources given and waits until it is ready. +// +// This method is mainly for hook implementations. It watches for a resource to +// hit a particular milestone. The milestone depends on the Kind. +// +// For most kinds, it checks to see if the resource is marked as Added or Modified +// by the Kubernetes event stream. For some kinds, it does more: +// +// - Jobs: A job is marked "Ready" when it has successfully completed. This is +// ascertained by watching the Status fields in a job's output. +// - Pods: A pod is marked "Ready" when it has successfully completed. This is +// ascertained by watching the status.phase field in a pod's output. +// +// Handling for other kinds will be added as necessary. +func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): + // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 + return perform(resources, c.watchTimeout(timeout)) +} + +func perform(infos ResourceList, fn func(*resource.Info) error) error { + var result error + + if len(infos) == 0 { + return ErrNoObjectsVisited + } + + errs := make(chan error) + go batchPerform(infos, fn, errs) + + for range infos { + err := <-errs + if err != nil { + result = multierror.Append(result, err) + } + } + + return result +} + +// getManagedFieldsManager returns the manager string. If one was set it will be returned. +// Otherwise, one is calculated based on the name of the binary. +func getManagedFieldsManager() string { + + // When a manager is explicitly set use it + if ManagedFieldsManager != "" { + return ManagedFieldsManager + } + + // When no manager is set and no calling application can be found it is unknown + if len(os.Args[0]) == 0 { + return "unknown" + } + + // When there is an application that can be determined and no set manager + // use the base name. This is one of the ways Kubernetes libs handle figuring + // names out. + return filepath.Base(os.Args[0]) +} + +func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) { + var kind string + var wg sync.WaitGroup + for _, info := range infos { + currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind + if kind != currentKind { + wg.Wait() + kind = currentKind + } + wg.Add(1) + go func(i *resource.Info) { + errs <- fn(i) + wg.Done() + }(info) + } +} + +func createResource(info *resource.Info) error { + return retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object) + if err != nil { + return err + } + return info.Refresh(obj, true) + }) +} + +func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) error { + return retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + opts := &metav1.DeleteOptions{PropagationPolicy: &policy} + _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts) + return err + }) +} + +func createPatch(target *resource.Info, current runtime.Object, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) { + oldData, err := json.Marshal(current) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing current configuration") + } + newData, err := json.Marshal(target.Object) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing target configuration") + } + + // Fetch the current object for the three way merge + helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + currentObj, err := helper.Get(target.Namespace, target.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, types.StrategicMergePatchType, errors.Wrapf(err, "unable to get data for current object %s/%s", target.Namespace, target.Name) + } + + // Even if currentObj is nil (because it was not found), it will marshal just fine + currentData, err := json.Marshal(currentObj) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing live configuration") + } + + // Get a versioned object + versionedObject := AsVersioned(target) + + // Unstructured objects, such as CRDs, may not have a not registered error + // returned from ConvertToVersion. Anything that's unstructured should + // use generic JSON merge patch. Strategic Merge Patch is not supported + // on objects like CRDs. + _, isUnstructured := versionedObject.(runtime.Unstructured) + + // On newer K8s versions, CRDs aren't unstructured but has this dedicated type + _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) + + if isUnstructured || isCRD { + if threeWayMergeForUnstructured { + // from https://github.com/kubernetes/kubectl/blob/b83b2ec7d15f286720bccf7872b5c72372cb8e80/pkg/cmd/apply/patcher.go#L129 + preconditions := []mergepatch.PreconditionFunc{ + mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), + mergepatch.RequireMetadataKeyUnchanged("name"), + } + patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(oldData, newData, currentData, preconditions...) + if err != nil && mergepatch.IsPreconditionFailed(err) { + err = fmt.Errorf("%w: at least one field was changed: apiVersion, kind or name", err) + } + return patch, types.MergePatchType, err + } + // fall back to generic JSON merge patch + patch, err := jsonpatch.CreateMergePatch(oldData, newData) + return patch, types.MergePatchType, err + } + + patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "unable to create patch metadata from object") + } + + patch, err := strategicpatch.CreateThreeWayMergePatch(oldData, newData, currentData, patchMeta, true) + return patch, types.StrategicMergePatchType, err +} + +func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force, threeWayMergeForUnstructured bool) error { + var ( + obj runtime.Object + helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + kind = target.Mapping.GroupVersionKind.Kind + ) + + // if --force is applied, attempt to replace the existing resource with the new object. + if force { + var err error + obj, err = helper.Replace(target.Namespace, target.Name, true, target.Object) + if err != nil { + return errors.Wrap(err, "failed to replace object") + } + c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind) + } else { + patch, patchType, err := createPatch(target, currentObj, threeWayMergeForUnstructured) + if err != nil { + return errors.Wrap(err, "failed to create patch") + } + + if patch == nil || string(patch) == "{}" { + c.Log("Looks like there are no changes for %s %q", kind, target.Name) + // This needs to happen to make sure that Helm has the latest info from the API + // Otherwise there will be no labels and other functions that use labels will panic + if err := target.Get(); err != nil { + return errors.Wrap(err, "failed to refresh resource information") + } + return nil + } + // send patch to server + c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace) + obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) + if err != nil { + return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) + } + } + + target.Refresh(obj, true) + return nil +} + +func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { + kind := info.Mapping.GroupVersionKind.Kind + switch kind { + case "Job", "Pod": + default: + return nil + } + + c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + + // Use a selector on the name of the resource. This should be unique for the + // given version and kind + selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) + if err != nil { + return err + } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) + + // What we watch for depends on the Kind. + // - For a Job, we watch for completion. + // - For all else, we watch until Ready. + // In the future, we might want to add some special logic for types + // like Ingress, Volume, etc. + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { + // Make sure the incoming object is versioned as we use unstructured + // objects when we build manifests + obj := convertWithMapper(e.Object, info.Mapping) + switch e.Type { + case watch.Added, watch.Modified: + // For things like a secret or a config map, this is the best indicator + // we get. We care mostly about jobs, where what we want to see is + // the status go into a good state. For other types, like ReplicaSet + // we don't really do anything to support these as hooks. + c.Log("Add/Modify event for %s: %v", info.Name, e.Type) + switch kind { + case "Job": + return c.waitForJob(obj, info.Name) + case "Pod": + return c.waitForPodSuccess(obj, info.Name) + } + return true, nil + case watch.Deleted: + c.Log("Deleted event for %s", info.Name) + return true, nil + case watch.Error: + // Handle error and return with an error. + c.Log("Error event for %s", info.Name) + return true, errors.Errorf("failed to deploy %s", info.Name) + default: + return false, nil + } + }) + return err +} + +// waitForJob is a helper that waits for a job to complete. +// +// This operates on an event returned from a watcher. +func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*batch.Job) + if !ok { + return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) + } + + for _, c := range o.Status.Conditions { + if c.Type == batch.JobComplete && c.Status == "True" { + return true, nil + } else if c.Type == batch.JobFailed && c.Status == "True" { + return true, errors.Errorf("job %s failed: %s", name, c.Reason) + } + } + + c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + return false, nil +} + +// waitForPodSuccess is a helper that waits for a pod to complete. +// +// This operates on an event returned from a watcher. +func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*v1.Pod) + if !ok { + return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) + } + + switch o.Status.Phase { + case v1.PodSucceeded: + c.Log("Pod %s succeeded", o.Name) + return true, nil + case v1.PodFailed: + return true, errors.Errorf("pod %s failed", o.Name) + case v1.PodPending: + c.Log("Pod %s pending", o.Name) + case v1.PodRunning: + c.Log("Pod %s running", o.Name) + } + + return false, nil +} + +// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions +func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { + podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) + if err != nil { + return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) + } + return podList, nil +} + +// OutputContainerLogsForPodList is a helper that outputs logs for a list of pods +func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + options := &v1.PodLogOptions{ + Container: container.Name, + } + request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) + err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) + if err2 != nil { + return err2 + } + } + } + return nil +} + +func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { + readCloser, err := request.Stream(context.Background()) + if err != nil { + return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) + } + defer readCloser.Close() + _, err = io.Copy(writer, readCloser) + if err != nil { + return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) + } + if err != nil { + return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) + } + return nil +} + +// scrubValidationError removes kubectl info from the message. +func scrubValidationError(err error) error { + if err == nil { + return nil + } + const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" + + if strings.Contains(err.Error(), stopValidateMessage) { + return errors.New(strings.ReplaceAll(err.Error(), "; "+stopValidateMessage, "")) + } + return err +} + +// WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase +// and returns said phase (PodSucceeded or PodFailed qualify). +func (c *Client) WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) { + client, err := c.getKubeClient() + if err != nil { + return v1.PodUnknown, err + } + to := int64(timeout) + watcher, err := client.CoreV1().Pods(c.namespace()).Watch(context.Background(), metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", name), + TimeoutSeconds: &to, + }) + if err != nil { + return v1.PodUnknown, err + } + + for event := range watcher.ResultChan() { + p, ok := event.Object.(*v1.Pod) + if !ok { + return v1.PodUnknown, fmt.Errorf("%s not a pod", name) + } + switch p.Status.Phase { + case v1.PodFailed: + return v1.PodFailed, nil + case v1.PodSucceeded: + return v1.PodSucceeded, nil + } + } + + return v1.PodUnknown, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/config.go b/vendor/helm.sh/helm/v3/pkg/kube/config.go new file mode 100644 index 000000000000..e00c9acb1584 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/config.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import "k8s.io/cli-runtime/pkg/genericclioptions" + +// GetConfig returns a Kubernetes client config. +// +// Deprecated +func GetConfig(kubeconfig, context, namespace string) *genericclioptions.ConfigFlags { + cf := genericclioptions.NewConfigFlags(true) + cf.Namespace = &namespace + cf.Context = &context + cf.KubeConfig = &kubeconfig + return cf +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/converter.go b/vendor/helm.sh/helm/v3/pkg/kube/converter.go new file mode 100644 index 000000000000..3bf0e358c386 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/converter.go @@ -0,0 +1,69 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "sync" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes/scheme" +) + +var k8sNativeScheme *runtime.Scheme +var k8sNativeSchemeOnce sync.Once + +// AsVersioned converts the given info into a runtime.Object with the correct +// group and version set +func AsVersioned(info *resource.Info) runtime.Object { + return convertWithMapper(info.Object, info.Mapping) +} + +// convertWithMapper converts the given object with the optional provided +// RESTMapping. If no mapping is provided, the default schema versioner is used +func convertWithMapper(obj runtime.Object, mapping *meta.RESTMapping) runtime.Object { + s := kubernetesNativeScheme() + var gv = runtime.GroupVersioner(schema.GroupVersions(s.PrioritizedVersionsAllGroups())) + if mapping != nil { + gv = mapping.GroupVersionKind.GroupVersion() + } + if obj, err := runtime.ObjectConvertor(s).ConvertToVersion(obj, gv); err == nil { + return obj + } + return obj +} + +// kubernetesNativeScheme returns a clean *runtime.Scheme with _only_ Kubernetes +// native resources added to it. This is required to break free of custom resources +// that may have been added to scheme.Scheme due to Helm being used as a package in +// combination with e.g. a versioned kube client. If we would not do this, the client +// may attempt to perform e.g. a 3-way-merge strategy patch for custom resources. +func kubernetesNativeScheme() *runtime.Scheme { + k8sNativeSchemeOnce.Do(func() { + k8sNativeScheme = runtime.NewScheme() + scheme.AddToScheme(k8sNativeScheme) + // API extensions are not in the above scheme set, + // and must thus be added separately. + apiextensionsv1beta1.AddToScheme(k8sNativeScheme) + apiextensionsv1.AddToScheme(k8sNativeScheme) + }) + return k8sNativeScheme +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/factory.go b/vendor/helm.sh/helm/v3/pkg/kube/factory.go new file mode 100644 index 000000000000..f19d62dc3c6a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/factory.go @@ -0,0 +1,51 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/validation" +) + +// Factory provides abstractions that allow the Kubectl command to be extended across multiple types +// of resources and different API sets. +// This interface is a minimal copy of the kubectl Factory interface containing only the functions +// needed by Helm. Since Kubernetes Go APIs, including interfaces, can change in any minor release +// this interface is not covered by the Helm backwards compatibility guarantee. The reasons for the +// minimal copy is that it does not include the full interface. Changes or additions to functions +// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes +// being exposed. +type Factory interface { + // ToRawKubeConfigLoader return kubeconfig loader as-is + ToRawKubeConfigLoader() clientcmd.ClientConfig + + // DynamicClient returns a dynamic client ready for use + DynamicClient() (dynamic.Interface, error) + + // KubernetesClientSet gives you back an external clientset + KubernetesClientSet() (*kubernetes.Clientset, error) + + // NewBuilder returns an object that assists in loading objects from both disk and the server + // and which implements the common patterns for CLI interactions with generic resources. + NewBuilder() *resource.Builder + + // Returns a schema that can validate objects stored on disk. + Validator(validationDirective string) (validation.Schema, error) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go new file mode 100644 index 000000000000..852a3e015dfa --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go @@ -0,0 +1,172 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fake implements various fake KubeClients for use in testing +package fake + +import ( + "io" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// FailingKubeClient implements KubeClient for testing purposes. It also has +// additional errors you can set to fail different functions, otherwise it +// delegates all its calls to `PrintingKubeClient` +type FailingKubeClient struct { + PrintingKubeClient + CreateError error + GetError error + WaitError error + DeleteError error + DeleteWithPropagationError error + WatchUntilReadyError error + UpdateError error + BuildError error + BuildTableError error + BuildDummy bool + DummyResources kube.ResourceList + BuildUnstructuredError error + WaitAndGetCompletedPodPhaseError error + WaitDuration time.Duration +} + +// Create returns the configured error if set or prints +func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + if f.CreateError != nil { + return nil, f.CreateError + } + return f.PrintingKubeClient.Create(resources) +} + +// Get returns the configured error if set or prints +func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[string][]runtime.Object, error) { + if f.GetError != nil { + return nil, f.GetError + } + return f.PrintingKubeClient.Get(resources, related) +} + +// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints. +func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { + time.Sleep(f.WaitDuration) + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.Wait(resources, d) +} + +// WaitWithJobs returns the configured error if set or prints +func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.WaitWithJobs(resources, d) +} + +// WaitForDelete returns the configured error if set or prints +func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.WaitForDelete(resources, d) +} + +// Delete returns the configured error if set or prints +func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + if f.DeleteError != nil { + return nil, []error{f.DeleteError} + } + return f.PrintingKubeClient.Delete(resources) +} + +// WatchUntilReady returns the configured error if set or prints +func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { + if f.WatchUntilReadyError != nil { + return f.WatchUntilReadyError + } + return f.PrintingKubeClient.WatchUntilReady(resources, d) +} + +// Update returns the configured error if set or prints +func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { + if f.UpdateError != nil { + return &kube.Result{}, f.UpdateError + } + return f.PrintingKubeClient.Update(r, modified, ignoreMe) +} + +// Update returns the configured error if set or prints +func (f *FailingKubeClient) UpdateThreeWayMerge(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { + if f.UpdateError != nil { + return &kube.Result{}, f.UpdateError + } + return f.PrintingKubeClient.Update(r, modified, ignoreMe) +} + +// Build returns the configured error if set or prints +func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) { + if f.BuildError != nil { + return []*resource.Info{}, f.BuildError + } + if f.DummyResources != nil { + return f.DummyResources, nil + } + if f.BuildDummy { + return createDummyResourceList(), nil + } + return f.PrintingKubeClient.Build(r, false) +} + +// BuildTable returns the configured error if set or prints +func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, error) { + if f.BuildTableError != nil { + return []*resource.Info{}, f.BuildTableError + } + return f.PrintingKubeClient.BuildTable(r, false) +} + +// WaitAndGetCompletedPodPhase returns the configured error if set or prints +func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) { + if f.WaitAndGetCompletedPodPhaseError != nil { + return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError + } + return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d) +} + +// DeleteWithPropagationPolicy returns the configured error if set or prints +func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { + if f.DeleteWithPropagationError != nil { + return nil, []error{f.DeleteWithPropagationError} + } + return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) +} + +func createDummyResourceList() kube.ResourceList { + var resInfo resource.Info + resInfo.Name = "dummyName" + resInfo.Namespace = "dummyNamespace" + var resourceList kube.ResourceList + resourceList.Append(&resInfo) + return resourceList + +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go new file mode 100644 index 000000000000..95c89e0fd52a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go @@ -0,0 +1,149 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + "io" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// PrintingKubeClient implements KubeClient, but simply prints the reader to +// the given output. +type PrintingKubeClient struct { + Out io.Writer + LogOutput io.Writer +} + +// IsReachable checks if the cluster is reachable +func (p *PrintingKubeClient) IsReachable() error { + return nil +} + +// Create prints the values of what would be created with a real KubeClient. +func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, err + } + return &kube.Result{Created: resources}, nil +} + +func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[string][]runtime.Object, error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, err + } + return make(map[string][]runtime.Object), nil +} + +func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Delete implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + +// WatchUntilReady implements KubeClient WatchUntilReady. +func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Update implements KubeClient Update. +func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(modified)) + if err != nil { + return nil, err + } + // TODO: This doesn't completely mock out have some that get created, + // updated, and deleted. I don't think these are used in any unit tests, but + // we may want to refactor a way to handle future tests + return &kube.Result{Updated: modified}, nil +} + +// Build implements KubeClient Build. +func (p *PrintingKubeClient) Build(_ io.Reader, _ bool) (kube.ResourceList, error) { + return []*resource.Info{}, nil +} + +// BuildTable implements KubeClient BuildTable. +func (p *PrintingKubeClient) BuildTable(_ io.Reader, _ bool) (kube.ResourceList, error) { + return []*resource.Info{}, nil +} + +// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. +func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) { + return v1.PodSucceeded, nil +} + +// GetPodList implements KubeClient GetPodList. +func (p *PrintingKubeClient) GetPodList(_ string, _ metav1.ListOptions) (*v1.PodList, error) { + return &v1.PodList{}, nil +} + +// OutputContainerLogsForPodList implements KubeClient OutputContainerLogsForPodList. +func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNamespace string, _ func(namespace, pod, container string) io.Writer) error { + _, err := io.Copy(p.LogOutput, strings.NewReader(fmt.Sprintf("attempted to output logs for namespace: %s", someNamespace))) + return err +} + +// DeleteWithPropagationPolicy implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + +func bufferize(resources kube.ResourceList) io.Reader { + var builder strings.Builder + for _, info := range resources { + builder.WriteString(info.String() + "\n") + } + return strings.NewReader(builder.String()) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/interface.go b/vendor/helm.sh/helm/v3/pkg/kube/interface.go new file mode 100644 index 000000000000..db7591f65031 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/interface.go @@ -0,0 +1,136 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "io" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Interface represents a client capable of communicating with the Kubernetes API. +// +// A KubernetesClient must be concurrency safe. +type Interface interface { + // Create creates one or more resources. + Create(resources ResourceList) (*Result, error) + + // Wait waits up to the given timeout for the specified resources to be ready. + Wait(resources ResourceList, timeout time.Duration) error + + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. + WaitWithJobs(resources ResourceList, timeout time.Duration) error + + // Delete destroys one or more resources. + Delete(resources ResourceList) (*Result, []error) + + // WatchUntilReady watches the resources given and waits until it is ready. + // + // This method is mainly for hook implementations. It watches for a resource to + // hit a particular milestone. The milestone depends on the Kind. + // + // For Jobs, "ready" means the Job ran to completion (exited without error). + // For Pods, "ready" means the Pod phase is marked "succeeded". + // For all other kinds, it means the kind was created or modified without + // error. + WatchUntilReady(resources ResourceList, timeout time.Duration) error + + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + + // Build creates a resource list from a Reader. + // + // Reader must contain a YAML stream (one or more YAML documents separated + // by "\n---\n") + // + // Validates against OpenAPI schema if validate is true. + Build(reader io.Reader, validate bool) (ResourceList, error) + + // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase + // and returns said phase (PodSucceeded or PodFailed qualify). + WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) + + // IsReachable checks whether the client is able to connect to the cluster. + IsReachable() error +} + +// InterfaceExt was introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. +type InterfaceExt interface { + // WaitForDelete wait up to the given timeout for the specified resources to be deleted. + WaitForDelete(resources ResourceList, timeout time.Duration) error +} + +// InterfaceThreeWayMerge was introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceThreeWayMerge and integrate its method(s) into the Interface. +type InterfaceThreeWayMerge interface { + UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) +} + +// InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceLogs and integrate its method(s) into the Interface. +type InterfaceLogs interface { + // GetPodList list all pods that match the specified listOptions + GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) + + // OutputContainerLogsForPodList output the logs for a pod list + OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error +} + +// InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceDeletionPropagation and integrate its method(s) into the Interface. +type InterfaceDeletionPropagation interface { + // DeleteWithPropagationPolicy destroys one or more resources. The deletion propagation is handled as per the given deletion propagation value. + DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) +} + +// InterfaceResources is introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceResources and integrate its method(s) into the Interface. +type InterfaceResources interface { + // Get details of deployed resources. + // The first argument is a list of resources to get. The second argument + // specifies if related pods should be fetched. For example, the pods being + // managed by a deployment. + Get(resources ResourceList, related bool) (map[string][]runtime.Object, error) + + // BuildTable creates a resource list from a Reader. This differs from + // Interface.Build() in that a table kind is returned. A table is useful + // if you want to use a printer to display the information. + // + // Reader must contain a YAML stream (one or more YAML documents separated + // by "\n---\n") + // + // Validates against OpenAPI schema if validate is true. + // TODO Helm 4: Integrate into Build with an argument + BuildTable(reader io.Reader, validate bool) (ResourceList, error) +} + +var _ Interface = (*Client)(nil) +var _ InterfaceExt = (*Client)(nil) +var _ InterfaceThreeWayMerge = (*Client)(nil) +var _ InterfaceLogs = (*Client)(nil) +var _ InterfaceDeletionPropagation = (*Client)(nil) +var _ InterfaceResources = (*Client)(nil) diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go new file mode 100644 index 000000000000..55c4a39bf13e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go @@ -0,0 +1,463 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + + deploymentutil "helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util" +) + +// ReadyCheckerOption is a function that configures a ReadyChecker. +type ReadyCheckerOption func(*ReadyChecker) + +// PausedAsReady returns a ReadyCheckerOption that configures a ReadyChecker +// to consider paused resources to be ready. For example a Deployment +// with spec.paused equal to true would be considered ready. +func PausedAsReady(pausedAsReady bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.pausedAsReady = pausedAsReady + } +} + +// CheckJobs returns a ReadyCheckerOption that configures a ReadyChecker +// to consider readiness of Job resources. +func CheckJobs(checkJobs bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.checkJobs = checkJobs + } +} + +// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can +// be used to override defaults. +func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker { + c := ReadyChecker{ + client: cl, + log: log, + } + if c.log == nil { + c.log = nopLogger + } + for _, opt := range opts { + opt(&c) + } + return c +} + +// ReadyChecker is a type that can check core Kubernetes types for readiness. +type ReadyChecker struct { + client kubernetes.Interface + log func(string, ...interface{}) + checkJobs bool + pausedAsReady bool +} + +// IsReady checks if v is ready. It supports checking readiness for pods, +// deployments, persistent volume claims, services, daemon sets, custom +// resource definitions, stateful sets, replication controllers, jobs (optional), +// and replica sets. All other resource kinds are always considered ready. +// +// IsReady will fetch the latest state of the object from the server prior to +// performing readiness checks, and it will return any error encountered. +func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) { + switch value := AsVersioned(v).(type) { + case *corev1.Pod: + pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil || !c.isPodReady(pod) { + return false, err + } + case *batchv1.Job: + if c.checkJobs { + job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + ready, err := c.jobReady(job) + return ready, err + } + case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment: + currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + return c.pausedAsReady, nil + } + // Find RS associated with deployment + newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, c.client.AppsV1()) + if err != nil || newReplicaSet == nil { + return false, err + } + if !c.deploymentReady(newReplicaSet, currentDeployment) { + return false, nil + } + case *corev1.PersistentVolumeClaim: + claim, err := c.client.CoreV1().PersistentVolumeClaims(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.volumeReady(claim) { + return false, nil + } + case *corev1.Service: + svc, err := c.client.CoreV1().Services(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.serviceReady(svc) { + return false, nil + } + case *extensionsv1beta1.DaemonSet, *appsv1.DaemonSet, *appsv1beta2.DaemonSet: + ds, err := c.client.AppsV1().DaemonSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.daemonSetReady(ds) { + return false, nil + } + case *apiextv1beta1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1beta1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdBetaReady(*crd) { + return false, nil + } + case *apiextv1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdReady(*crd) { + return false, nil + } + case *appsv1.StatefulSet, *appsv1beta1.StatefulSet, *appsv1beta2.StatefulSet: + sts, err := c.client.AppsV1().StatefulSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.statefulSetReady(sts) { + return false, nil + } + case *corev1.ReplicationController: + rc, err := c.client.CoreV1().ReplicationControllers(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicationControllerReady(rc) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } + case *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: + rs, err := c.client.AppsV1().ReplicaSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicaSetReady(rs) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } + } + return true, nil +} + +func (c *ReadyChecker) podsReadyForObject(ctx context.Context, namespace string, obj runtime.Object) (bool, error) { + pods, err := c.podsforObject(ctx, namespace, obj) + if err != nil { + return false, err + } + for _, pod := range pods { + if !c.isPodReady(&pod) { + return false, nil + } + } + return true, nil +} + +func (c *ReadyChecker) podsforObject(ctx context.Context, namespace string, obj runtime.Object) ([]corev1.Pod, error) { + selector, err := SelectorsForObject(obj) + if err != nil { + return nil, err + } + list, err := getPods(ctx, c.client, namespace, selector.String()) + return list, err +} + +// isPodReady returns true if a pod is ready; false otherwise. +func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return true + } + } + c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) + return false +} + +func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { + if job.Status.Failed > *job.Spec.BackoffLimit { + c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) + // If a job is failed, it can't recover, so throw an error + return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) + } + if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { + c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) + return false, nil + } + return true, nil +} + +func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { + // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set) + if s.Spec.Type == corev1.ServiceTypeExternalName { + return true + } + + // Ensure that the service cluster IP is not empty + if s.Spec.ClusterIP == "" { + c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + + // This checks if the service has a LoadBalancer and that balancer has an Ingress defined + if s.Spec.Type == corev1.ServiceTypeLoadBalancer { + // do not wait when at least 1 external IP is set + if len(s.Spec.ExternalIPs) > 0 { + c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) + return true + } + + if s.Status.LoadBalancer.Ingress == nil { + c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + } + + return true +} + +func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { + if v.Status.Phase != corev1.ClaimBound { + c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) + return false + } + return true +} + +func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { + // Verify the replicaset readiness + if !c.replicaSetReady(rs) { + return false + } + // Verify the generation observed by the deployment controller matches the spec generation + if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { + c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation) + return false + } + + expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) + if !(rs.Status.ReadyReplicas >= expectedReady) { + c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) + return false + } + return true +} + +func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { + // Verify the generation observed by the daemonSet controller matches the spec generation + if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { + c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation) + return false + } + + // If the update strategy is not a rolling update, there will be nothing to wait for + if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return true + } + + // Make sure all the updated pods have been scheduled + if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) + return false + } + maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) + if err != nil { + // If for some reason the value is invalid, set max unavailable to the + // number of desired replicas. This is the same behavior as the + // `MaxUnavailable` function in deploymentutil + maxUnavailable = int(ds.Status.DesiredNumberScheduled) + } + + expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable + if !(int(ds.Status.NumberReady) >= expectedReady) { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) + return false + } + return true +} + +// Because the v1 extensions API is not available on all supported k8s versions +// yet and because Go doesn't support generics, we need to have a duplicate +// function to support the v1beta1 types +func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1beta1.Established: + if cond.Status == apiextv1beta1.ConditionTrue { + return true + } + case apiextv1beta1.NamesAccepted: + if cond.Status == apiextv1beta1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1.Established: + if cond.Status == apiextv1.ConditionTrue { + return true + } + case apiextv1.NamesAccepted: + if cond.Status == apiextv1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { + // Verify the generation observed by the statefulSet controller matches the spec generation + if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { + c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation) + return false + } + + // If the update strategy is not a rolling update, there will be nothing to wait for + if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { + c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type) + return true + } + + // Dereference all the pointers because StatefulSets like them + var partition int + // 1 is the default for replicas if not set + replicas := 1 + // For some reason, even if the update strategy is a rolling update, the + // actual rollingUpdate field can be nil. If it is, we can safely assume + // there is no partition value + if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) + } + if sts.Spec.Replicas != nil { + replicas = int(*sts.Spec.Replicas) + } + + // Because an update strategy can use partitioning, we need to calculate the + // number of updated replicas we should have. For example, if the replicas + // is set to 3 and the partition is 2, we'd expect only one pod to be + // updated + expectedReplicas := replicas - partition + + // Make sure all the updated pods have been scheduled + if int(sts.Status.UpdatedReplicas) < expectedReplicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) + return false + } + + if int(sts.Status.ReadyReplicas) != replicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + return false + } + // This check only makes sense when all partitions are being upgraded otherwise during a + // partitioned rolling upgrade, this condition will never evaluate to true, leading to + // error. + if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { + c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision) + return false + } + + c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + return true +} + +func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { + // Verify the generation observed by the replicationController controller matches the spec generation + if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { + c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation) + return false + } + return true +} + +func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { + // Verify the generation observed by the replicaSet controller matches the spec generation + if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { + c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation) + return false + } + return true +} + +func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) { + list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + return list.Items, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/vendor/helm.sh/helm/v3/pkg/kube/resource.go new file mode 100644 index 000000000000..db8e9178e719 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/resource.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import "k8s.io/cli-runtime/pkg/resource" + +// ResourceList provides convenience methods for comparing collections of Infos. +type ResourceList []*resource.Info + +// Append adds an Info to the Result. +func (r *ResourceList) Append(val *resource.Info) { + *r = append(*r, val) +} + +// Visit implements resource.Visitor. The visitor stops if fn returns an error. +func (r ResourceList) Visit(fn resource.VisitorFunc) error { + for _, i := range r { + if err := fn(i, nil); err != nil { + return err + } + } + return nil +} + +// Filter returns a new Result with Infos that satisfy the predicate fn. +func (r ResourceList) Filter(fn func(*resource.Info) bool) ResourceList { + var result ResourceList + for _, i := range r { + if fn(i) { + result.Append(i) + } + } + return result +} + +// Get returns the Info from the result that matches the name and kind. +func (r ResourceList) Get(info *resource.Info) *resource.Info { + for _, i := range r { + if isMatchingInfo(i, info) { + return i + } + } + return nil +} + +// Contains checks to see if an object exists. +func (r ResourceList) Contains(info *resource.Info) bool { + for _, i := range r { + if isMatchingInfo(i, info) { + return true + } + } + return false +} + +// Difference will return a new Result with objects not contained in rs. +func (r ResourceList) Difference(rs ResourceList) ResourceList { + return r.Filter(func(info *resource.Info) bool { + return !rs.Contains(info) + }) +} + +// Intersect will return a new Result with objects contained in both Results. +func (r ResourceList) Intersect(rs ResourceList) ResourceList { + return r.Filter(rs.Contains) +} + +// isMatchingInfo returns true if infos match on Name and GroupVersionKind. +func isMatchingInfo(a, b *resource.Info) bool { + return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind && a.Mapping.GroupVersionKind.Group == b.Mapping.GroupVersionKind.Group +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go new file mode 100644 index 000000000000..46b8680dd2cb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go @@ -0,0 +1,27 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +// ResourcePolicyAnno is the annotation name for a resource policy +const ResourcePolicyAnno = "helm.sh/resource-policy" + +// KeepPolicy is the resource policy type for keep +// +// This resource policy type allows resources to skip being deleted +// +// during an uninstallRelease action. +const KeepPolicy = "keep" diff --git a/vendor/helm.sh/helm/v3/pkg/kube/result.go b/vendor/helm.sh/helm/v3/pkg/kube/result.go new file mode 100644 index 000000000000..c3e171c2e4d1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/result.go @@ -0,0 +1,28 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// Result contains the information of created, updated, and deleted resources +// for various kube API calls along with helper methods for using those +// resources +type Result struct { + Created ResourceList + Updated ResourceList + Deleted ResourceList +} + +// If needed, we can add methods to the Result type for things like diffing diff --git a/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go b/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go new file mode 100644 index 000000000000..fdb1035291de --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go @@ -0,0 +1,80 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "strings" +) + +type RetryingRoundTripper struct { + Wrapped http.RoundTripper +} + +func (rt *RetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.roundTrip(req, 1, nil) +} + +func (rt *RetryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { + if retry < 0 { + return prevResp, nil + } + resp, rtErr := rt.Wrapped.RoundTrip(req) + if rtErr != nil { + return resp, rtErr + } + if resp.StatusCode < 500 { + return resp, rtErr + } + if resp.Header.Get("content-type") != "application/json" { + return resp, rtErr + } + b, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return resp, rtErr + } + + var ke kubernetesError + r := bytes.NewReader(b) + err = json.NewDecoder(r).Decode(&ke) + r.Seek(0, io.SeekStart) + resp.Body = io.NopCloser(r) + if err != nil { + return resp, rtErr + } + if ke.Code < 500 { + return resp, rtErr + } + // Matches messages like "etcdserver: leader changed" + if strings.HasSuffix(ke.Message, "etcdserver: leader changed") { + return rt.roundTrip(req, retry-1, resp) + } + // Matches messages like "rpc error: code = Unknown desc = raft proposal dropped" + if strings.HasSuffix(ke.Message, "raft proposal dropped") { + return rt.roundTrip(req, retry-1, resp) + } + return resp, rtErr +} + +type kubernetesError struct { + Message string `json:"message"` + Code int `json:"code"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/wait.go b/vendor/helm.sh/helm/v3/pkg/kube/wait.go new file mode 100644 index 000000000000..c602004ad4f3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/wait.go @@ -0,0 +1,176 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "k8s.io/apimachinery/pkg/util/wait" +) + +type waiter struct { + c ReadyChecker + timeout time.Duration + log func(string, ...interface{}) +} + +// waitForResources polls to get the current status of all pods, PVCs, Services and +// Jobs(optional) until all are ready or a timeout is reached +func (w *waiter) waitForResources(created ResourceList) error { + w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) + + startTime := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + + numberOfErrors := make([]int, len(created)) + for i := range numberOfErrors { + numberOfErrors[i] = 0 + } + + err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { + waitRetries := 30 + for i, v := range created { + ready, err := w.c.IsReady(ctx, v) + + if waitRetries > 0 && w.isRetryableError(err, v) { + numberOfErrors[i]++ + if numberOfErrors[i] > waitRetries { + w.log("Max number of retries reached") + return false, err + } + w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) + return false, nil + } + numberOfErrors[i] = 0 + if !ready { + return false, err + } + } + return true, nil + }) + + elapsed := time.Since(startTime).Round(time.Second) + if err != nil { + w.log("wait for resources failed after %v: %v", elapsed, err) + } else { + w.log("wait for resources succeeded within %v", elapsed) + } + + return err +} + +func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { + if err == nil { + return false + } + w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) + if ev, ok := err.(*apierrors.StatusError); ok { + statusCode := ev.Status().Code + retryable := w.isRetryableHTTPStatusCode(statusCode) + w.log("Status code received: %d. Retryable error? %t", statusCode, retryable) + return retryable + } + w.log("Retryable error? %t", true) + return true +} + +func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { + return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) +} + +// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached +func (w *waiter) waitForDeletedResources(deleted ResourceList) error { + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) + + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + + return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { + for _, v := range deleted { + err := v.Get() + if err == nil || !apierrors.IsNotFound(err) { + return false, err + } + } + return true, nil + }) +} + +// SelectorsForObject returns the pod label selector for a given object +// +// Modified version of https://github.com/kubernetes/kubernetes/blob/v1.14.1/pkg/kubectl/polymorphichelpers/helpers.go#L84 +func SelectorsForObject(object runtime.Object) (selector labels.Selector, err error) { + switch t := object.(type) { + case *extensionsv1beta1.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *corev1.ReplicationController: + selector = labels.SelectorFromSet(t.Spec.Selector) + case *appsv1.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta1.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *extensionsv1beta1.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *extensionsv1beta1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *batchv1.Job: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *corev1.Service: + if len(t.Spec.Selector) == 0 { + return nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name) + } + selector = labels.SelectorFromSet(t.Spec.Selector) + + default: + return nil, fmt.Errorf("selector for %T not implemented", object) + } + + return selector, errors.Wrap(err, "invalid label selector") +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/vendor/helm.sh/helm/v3/pkg/lint/lint.go new file mode 100644 index 000000000000..ef23ee7c89e5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/lint.go @@ -0,0 +1,48 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint // import "helm.sh/helm/v3/pkg/lint" + +import ( + "path/filepath" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/rules" + "helm.sh/helm/v3/pkg/lint/support" +) + +// All runs all the available linters on the given base directory. +func All(basedir string, values map[string]interface{}, namespace string, _ bool) support.Linter { + return AllWithKubeVersion(basedir, values, namespace, nil) +} + +// AllWithKubeVersion runs all the available linters on the given base directory, allowing to specify the kubernetes version. +func AllWithKubeVersion(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) support.Linter { + return AllWithKubeVersionAndSchemaValidation(basedir, values, namespace, kubeVersion, false) +} + +// AllWithKubeVersionAndSchemaValidation runs all the available linters on the given base directory, allowing to specify the kubernetes version and if schema validation is enabled or not. +func AllWithKubeVersionAndSchemaValidation(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) support.Linter { + // Using abs path to get directory context + chartDir, _ := filepath.Abs(basedir) + + linter := support.Linter{ChartDir: chartDir} + rules.Chartfile(&linter) + rules.ValuesWithOverrides(&linter, values) + rules.TemplatesWithSkipSchemaValidation(&linter, values, namespace, kubeVersion, skipSchemaValidation) + rules.Dependencies(&linter) + return linter +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go new file mode 100644 index 000000000000..910602b7df54 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go @@ -0,0 +1,213 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Masterminds/semver/v3" + "github.com/asaskevich/govalidator" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Chartfile runs a set of linter rules related to Chart.yaml file +func Chartfile(linter *support.Linter) { + chartFileName := "Chart.yaml" + chartPath := filepath.Join(linter.ChartDir, chartFileName) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath)) + + chartFile, err := chartutil.LoadChartfile(chartPath) + validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err)) + + // Guard clause. Following linter rules require a parsable ChartFile + if !validChartFile { + return + } + + // type check for Chart.yaml . ignoring error as any parse + // errors would already be caught in the above load function + chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile)) + + // Chart metadata + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile)) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile)) + linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) +} + +func validateChartVersionType(data map[string]interface{}) error { + return isStringValue(data, "version") +} + +func validateChartAppVersionType(data map[string]interface{}) error { + return isStringValue(data, "appVersion") +} + +func isStringValue(data map[string]interface{}, key string) error { + value, ok := data[key] + if !ok { + return nil + } + valueType := fmt.Sprintf("%T", value) + if valueType != "string" { + return errors.Errorf("%s should be of type string but it's of type %s", key, valueType) + } + return nil +} + +func validateChartYamlNotDirectory(chartPath string) error { + fi, err := os.Stat(chartPath) + + if err == nil && fi.IsDir() { + return errors.New("should be a file, not a directory") + } + return nil +} + +func validateChartYamlFormat(chartFileError error) error { + if chartFileError != nil { + return errors.Errorf("unable to parse YAML\n\t%s", chartFileError.Error()) + } + return nil +} + +func validateChartName(cf *chart.Metadata) error { + if cf.Name == "" { + return errors.New("name is required") + } + name := filepath.Base(cf.Name) + if name != cf.Name { + return fmt.Errorf("chart name %q is invalid", cf.Name) + } + return nil +} + +func validateChartAPIVersion(cf *chart.Metadata) error { + if cf.APIVersion == "" { + return errors.New("apiVersion is required. The value must be either \"v1\" or \"v2\"") + } + + if cf.APIVersion != chart.APIVersionV1 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("apiVersion '%s' is not valid. The value must be either \"v1\" or \"v2\"", cf.APIVersion) + } + + return nil +} + +func validateChartVersion(cf *chart.Metadata) error { + if cf.Version == "" { + return errors.New("version is required") + } + + version, err := semver.NewVersion(cf.Version) + + if err != nil { + return errors.Errorf("version '%s' is not a valid SemVer", cf.Version) + } + + c, err := semver.NewConstraint(">0.0.0-0") + if err != nil { + return err + } + valid, msg := c.Validate(version) + + if !valid && len(msg) > 0 { + return errors.Errorf("version %v", msg[0]) + } + + return nil +} + +func validateChartMaintainer(cf *chart.Metadata) error { + for _, maintainer := range cf.Maintainers { + if maintainer.Name == "" { + return errors.New("each maintainer requires a name") + } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { + return errors.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name) + } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) { + return errors.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name) + } + } + return nil +} + +func validateChartSources(cf *chart.Metadata) error { + for _, source := range cf.Sources { + if source == "" || !govalidator.IsRequestURL(source) { + return errors.Errorf("invalid source URL '%s'", source) + } + } + return nil +} + +func validateChartIconPresence(cf *chart.Metadata) error { + if cf.Icon == "" { + return errors.New("icon is recommended") + } + return nil +} + +func validateChartIconURL(cf *chart.Metadata) error { + if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) { + return errors.Errorf("invalid icon URL '%s'", cf.Icon) + } + return nil +} + +func validateChartDependencies(cf *chart.Metadata) error { + if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +func validateChartType(cf *chart.Metadata) error { + if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +// loadChartFileForTypeCheck loads the Chart.yaml +// in a generic form of a map[string]interface{}, so that the type +// of the values can be checked +func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + y := make(map[string]interface{}) + err = yaml.Unmarshal(b, &y) + return y, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go new file mode 100644 index 000000000000..f1ab1dcadc32 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go @@ -0,0 +1,103 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Dependencies runs lints against a chart's dependencies +// +// See https://github.com/helm/helm/issues/7910 +func Dependencies(linter *support.Linter) { + c, err := loader.LoadDir(linter.ChartDir) + if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) { + return + } + + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c)) + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c)) + linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c)) +} + +func validateChartFormat(chartError error) error { + if chartError != nil { + return errors.Errorf("unable to load chart\n\t%s", chartError) + } + return nil +} + +func validateDependencyInChartsDir(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Dependencies() { + dependencies[dep.Metadata.Name] = struct{}{} + } + for _, dep := range c.Metadata.Dependencies { + if _, ok := dependencies[dep.Name]; !ok { + missing = append(missing, dep.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependencyInMetadata(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Metadata.Dependencies { + dependencies[dep.Name] = struct{}{} + } + for _, dep := range c.Dependencies() { + if _, ok := dependencies[dep.Metadata.Name]; !ok { + missing = append(missing, dep.Metadata.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependenciesUnique(c *chart.Chart) (err error) { + dependencies := map[string]*chart.Dependency{} + shadowing := []string{} + + for _, dep := range c.Metadata.Dependencies { + key := dep.Name + if dep.Alias != "" { + key = dep.Alias + } + if dependencies[key] != nil { + shadowing = append(shadowing, key) + } + dependencies[key] = dep + } + if len(shadowing) > 0 { + err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ",")) + } + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go new file mode 100644 index 000000000000..90e7748a42f2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go @@ -0,0 +1,106 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/deprecation" + kscheme "k8s.io/client-go/kubernetes/scheme" + + "helm.sh/helm/v3/pkg/chartutil" +) + +var ( + // This should be set in the Makefile based on the version of client-go being imported. + // These constants will be overwritten with LDFLAGS. The version components must be + // strings in order for LDFLAGS to set them. + k8sVersionMajor = "1" + k8sVersionMinor = "20" +) + +// deprecatedAPIError indicates than an API is deprecated in Kubernetes +type deprecatedAPIError struct { + Deprecated string + Message string +} + +func (e deprecatedAPIError) Error() string { + msg := e.Message + return msg +} + +func validateNoDeprecations(resource *K8sYamlStruct, kubeVersion *chartutil.KubeVersion) error { + // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation + if resource.APIVersion == "" { + return nil + } + if resource.Kind == "" { + return nil + } + + majorVersion := k8sVersionMajor + minorVersion := k8sVersionMinor + + if kubeVersion != nil { + majorVersion = kubeVersion.Major + minorVersion = kubeVersion.Minor + } + + runtimeObject, err := resourceToRuntimeObject(resource) + if err != nil { + // do not error for non-kubernetes resources + if runtime.IsNotRegisteredError(err) { + return nil + } + return err + } + + maj, err := strconv.Atoi(majorVersion) + if err != nil { + return err + } + min, err := strconv.Atoi(minorVersion) + if err != nil { + return err + } + + if !deprecation.IsDeprecated(runtimeObject, maj, min) { + return nil + } + gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) + return deprecatedAPIError{ + Deprecated: gvk, + Message: deprecation.WarningMessage(runtimeObject), + } +} + +func resourceToRuntimeObject(resource *K8sYamlStruct) (runtime.Object, error) { + scheme := runtime.NewScheme() + kscheme.AddToScheme(scheme) + + gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind) + out, err := scheme.New(gvk) + if err != nil { + return nil, err + } + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go new file mode 100644 index 000000000000..41d1a1bab275 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go @@ -0,0 +1,356 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/validation" + apipath "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/lint/support" +) + +var ( + crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`) + releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`) +) + +// Templates lints the templates in the Linter. +func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { + TemplatesWithKubeVersion(linter, values, namespace, nil) +} + +// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) { + TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false) +} + +// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not. +func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion, skipSchemaValidation bool) { + fpath := "templates/" + templatesPath := filepath.Join(linter.ChartDir, fpath) + + templatesDirExist := linter.RunLinterRule(support.WarningSev, fpath, validateTemplatesDir(templatesPath)) + + // Templates directory is optional for now + if !templatesDirExist { + return + } + + // Load chart and parse templates + chart, err := loader.Load(linter.ChartDir) + + chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !chartLoaded { + return + } + + options := chartutil.ReleaseOptions{ + Name: "test-release", + Namespace: namespace, + } + + caps := chartutil.DefaultCapabilities.Copy() + if kubeVersion != nil { + caps.KubeVersion = *kubeVersion + } + + // lint ignores import-values + // See https://github.com/helm/helm/issues/9658 + if err := chartutil.ProcessDependenciesWithMerge(chart, values); err != nil { + return + } + + cvals, err := chartutil.CoalesceValues(chart, values) + if err != nil { + return + } + + valuesToRender, err := chartutil.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation) + if err != nil { + linter.RunLinterRule(support.ErrorSev, fpath, err) + return + } + var e engine.Engine + e.LintMode = true + renderedContentMap, err := e.Render(chart, valuesToRender) + + renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !renderOk { + return + } + + /* Iterate over all the templates to check: + - It is a .yaml file + - All the values in the template file is defined + - {{}} include | quote + - Generated content is a valid Yaml file + - Metadata.Namespace is not set + */ + for _, template := range chart.Templates { + fileName, data := template.Name, template.Data + fpath = fileName + + linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName)) + // These are v3 specific checks to make sure and warn people if their + // chart is not compatible with v3 + linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data)) + linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data)) + + // We only apply the following lint rules to yaml files + if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { + continue + } + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463 + // Check that all the templates have a matching value + // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate)) + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037 + // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate))) + + renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)] + if strings.TrimSpace(renderedContent) != "" { + linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent)) + + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096) + + // Lint all resources if the file contains multiple documents separated by --- + for { + // Even though K8sYamlStruct only defines a few fields, an error in any other + // key will be raised as well + var yamlStruct *K8sYamlStruct + + err := decoder.Decode(&yamlStruct) + if err == io.EOF { + break + } + + // If YAML linting fails here, it will always fail in the next block as well, so we should return here. + // fix https://github.com/helm/helm/issues/11391 + if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) { + return + } + if yamlStruct != nil { + // NOTE: set to warnings to allow users to support out-of-date kubernetes + // Refs https://github.com/helm/helm/issues/8596 + linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct)) + linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion)) + + linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent)) + linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent)) + } + } + } + } +} + +// validateTopIndentLevel checks that the content does not start with an indent level > 0. +// +// This error can occur when a template accidentally inserts space. It can cause +// unpredictable errors depending on whether the text is normalized before being passed +// into the YAML parser. So we trap it here. +// +// See https://github.com/helm/helm/issues/8467 +func validateTopIndentLevel(content string) error { + // Read lines until we get to a non-empty one + scanner := bufio.NewScanner(bytes.NewBufferString(content)) + for scanner.Scan() { + line := scanner.Text() + // If line is empty, skip + if strings.TrimSpace(line) == "" { + continue + } + // If it starts with one or more spaces, this is an error + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line) + } + // Any other condition passes. + return nil + } + return scanner.Err() +} + +// Validation functions +func validateTemplatesDir(templatesPath string) error { + if fi, err := os.Stat(templatesPath); err == nil { + if !fi.IsDir() { + return errors.New("not a directory") + } + } + return nil +} + +func validateAllowedExtension(fileName string) error { + ext := filepath.Ext(fileName) + validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"} + + for _, b := range validExtensions { + if b == ext { + return nil + } + } + + return errors.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext) +} + +func validateYamlContent(err error) error { + return errors.Wrap(err, "unable to parse YAML") +} + +// validateMetadataName uses the correct validation function for the object +// Kind, or if not set, defaults to the standard definition of a subdomain in +// DNS (RFC 1123), used by most resources. +func validateMetadataName(obj *K8sYamlStruct) error { + fn := validateMetadataNameFunc(obj) + allErrs := field.ErrorList{} + for _, msg := range fn(obj.Metadata.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg)) + } + if len(allErrs) > 0 { + return errors.Wrapf(allErrs.ToAggregate(), "object name does not conform to Kubernetes naming requirements: %q", obj.Metadata.Name) + } + return nil +} + +// validateMetadataNameFunc will return a name validation function for the +// object kind, if defined below. +// +// Rules should match those set in the various api validations: +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274 +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39 +// ... +// +// Implementing here to avoid importing k/k. +// +// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object +// kinds that don't have special requirements, so is the most likely to work if +// new kinds are added. +func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc { + switch strings.ToLower(obj.Kind) { + case "pod", "node", "secret", "endpoints", "resourcequota", // core + "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps + "autoscaler", // autoscaler + "cronjob", "job", // batch + "lease", // coordination + "endpointslice", // discovery + "networkpolicy", "ingress", // networking + "podsecuritypolicy", // policy + "priorityclass", // scheduling + "podpreset", // settings + "storageclass", "volumeattachment", "csinode": // storage + return validation.NameIsDNSSubdomain + case "service": + return validation.NameIsDNS1035Label + case "namespace": + return validation.ValidateNamespaceName + case "serviceaccount": + return validation.ValidateServiceAccountName + case "certificatesigningrequest": + // No validation. + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140 + return func(_ string, _ bool) []string { return nil } + case "role", "clusterrole", "rolebinding", "clusterrolebinding": + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 + return func(name string, _ bool) []string { + return apipath.IsValidPathSegmentName(name) + } + default: + return validation.NameIsDNSSubdomain + } +} + +func validateNoCRDHooks(manifest []byte) error { + if crdHookSearch.Match(manifest) { + return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart") + } + return nil +} + +func validateNoReleaseTime(manifest []byte) error { + if releaseTimeSearch.Match(manifest) { + return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates") + } + return nil +} + +// validateMatchSelector ensures that template specs have a selector declared. +// See https://github.com/helm/helm/issues/1990 +func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error { + switch yamlStruct.Kind { + case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet": + // verify that matchLabels or matchExpressions is present + if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) { + return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name) + } + } + return nil +} +func validateListAnnotations(yamlStruct *K8sYamlStruct, manifest string) error { + if yamlStruct.Kind == "List" { + m := struct { + Items []struct { + Metadata struct { + Annotations map[string]string + } + } + }{} + + if err := yaml.Unmarshal([]byte(manifest), &m); err != nil { + return validateYamlContent(err) + } + + for _, i := range m.Items { + if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok { + return errors.New("Annotation 'helm.sh/resource-policy' within List objects are ignored") + } + } + } + return nil +} + +// K8sYamlStruct stubs a Kubernetes YAML file. +// +// DEPRECATED: In Helm 4, this will be made a private type, as it is for use only within +// the rules package. +type K8sYamlStruct struct { + APIVersion string `json:"apiVersion"` + Kind string + Metadata k8sYamlMetadata +} + +type k8sYamlMetadata struct { + Namespace string + Name string +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go new file mode 100644 index 000000000000..538d8381b68e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go @@ -0,0 +1,86 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Values lints a chart's values.yaml file. +// +// This function is deprecated and will be removed in Helm 4. +func Values(linter *support.Linter) { + ValuesWithOverrides(linter, map[string]interface{}{}) +} + +// ValuesWithOverrides tests the values.yaml file. +// +// If a schema is present in the chart, values are tested against that. Otherwise, +// they are only tested for well-formedness. +// +// If additional values are supplied, they are coalesced into the values in values.yaml. +func ValuesWithOverrides(linter *support.Linter, values map[string]interface{}) { + file := "values.yaml" + vf := filepath.Join(linter.ChartDir, file) + fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) + + if !fileExists { + return + } + + linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, values)) +} + +func validateValuesFileExistence(valuesPath string) error { + _, err := os.Stat(valuesPath) + if err != nil { + return errors.Errorf("file does not exist") + } + return nil +} + +func validateValuesFile(valuesPath string, overrides map[string]interface{}) error { + values, err := chartutil.ReadValuesFile(valuesPath) + if err != nil { + return errors.Wrap(err, "unable to parse YAML") + } + + // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top + // level values against the top-level expectations. Subchart values are not linted. + // We could change that. For now, though, we retain that strategy, and thus can + // coalesce tables (like reuse-values does) instead of doing the full chart + // CoalesceValues + coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues = chartutil.CoalesceTables(coalescedValues, values) + + ext := filepath.Ext(valuesPath) + schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json" + schema, err := os.ReadFile(schemaPath) + if len(schema) == 0 { + return nil + } + if err != nil { + return err + } + return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema) +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go new file mode 100644 index 000000000000..bffefe8ffd35 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go @@ -0,0 +1,23 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package support contains tools for linting charts. + +Linting is the process of testing charts for errors or warnings regarding +formatting, compilation, or standards compliance. +*/ +package support // import "helm.sh/helm/v3/pkg/lint/support" diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/message.go b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go new file mode 100644 index 000000000000..5efbc7a61ca0 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go @@ -0,0 +1,76 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import "fmt" + +// Severity indicates the severity of a Message. +const ( + // UnknownSev indicates that the severity of the error is unknown, and should not stop processing. + UnknownSev = iota + // InfoSev indicates information, for example missing values.yaml file + InfoSev + // WarningSev indicates that something does not meet code standards, but will likely function. + WarningSev + // ErrorSev indicates that something will not likely function. + ErrorSev +) + +// sev matches the *Sev states. +var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"} + +// Linter encapsulates a linting run of a particular chart. +type Linter struct { + Messages []Message + // The highest severity of all the failing lint rules + HighestSeverity int + ChartDir string +} + +// Message describes an error encountered while linting. +type Message struct { + // Severity is one of the *Sev constants + Severity int + Path string + Err error +} + +func (m Message) Error() string { + return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error()) +} + +// NewMessage creates a new Message struct +func NewMessage(severity int, path string, err error) Message { + return Message{Severity: severity, Path: path, Err: err} +} + +// RunLinterRule returns true if the validation passed +func (l *Linter) RunLinterRule(severity int, path string, err error) bool { + // severity is out of bound + if severity < 0 || severity >= len(sev) { + return false + } + + if err != nil { + l.Messages = append(l.Messages, NewMessage(severity, path, err)) + + if severity > l.HighestSeverity { + l.HighestSeverity = severity + } + } + return err == nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go new file mode 100644 index 000000000000..34d3163a4064 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go @@ -0,0 +1,32 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "helm.sh/helm/v3/pkg/plugin" + +// Types of hooks +const ( + // Install is executed after the plugin is added. + Install = "install" + // Delete is executed after the plugin is removed. + Delete = "delete" + // Update is executed after the plugin is updated. + Update = "update" +) + +// PlatformHooks is a map of events to a command for a particular operating system and architecture. +type PlatformHooks map[string][]PlatformCommand + +// Hooks is a map of events to commands. +type Hooks map[string]string diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go new file mode 100644 index 000000000000..8eac1f74d54c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go @@ -0,0 +1,377 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "helm.sh/helm/v3/pkg/plugin" + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "unicode" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/cli" +) + +const PluginFileName = "plugin.yaml" + +// Downloaders represents the plugins capability if it can retrieve +// charts from special sources +type Downloaders struct { + // Protocols are the list of schemes from the charts URL. + Protocols []string `json:"protocols"` + // Command is the executable path with which the plugin performs + // the actual download for the corresponding Protocols + Command string `json:"command"` +} + +// PlatformCommand represents a command for a particular operating system and architecture +type PlatformCommand struct { + OperatingSystem string `json:"os"` + Architecture string `json:"arch"` + Command string `json:"command"` + Args []string `json:"args"` +} + +// Metadata describes a plugin. +// +// This is the plugin equivalent of a chart.Metadata. +type Metadata struct { + // Name is the name of the plugin + Name string `json:"name"` + + // Version is a SemVer 2 version of the plugin. + Version string `json:"version"` + + // Usage is the single-line usage text shown in help + Usage string `json:"usage"` + + // Description is a long description shown in places like `helm help` + Description string `json:"description"` + + // PlatformCommand is the plugin command, with a platform selector and support for args. + // + // The command and args will be passed through environment expansion, so env vars can + // be present in this command. Unless IgnoreFlags is set, this will + // also merge the flags passed from Helm. + // + // Note that the command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // The following rules will apply to processing platform commands: + // - If PlatformCommand is present, it will be used + // - If both OS and Arch match the current platform, search will stop and the command will be executed + // - If OS matches and Arch is empty, the command will be executed + // - If no OS/Arch match is found, the default command will be executed + // - If no matches are found in platformCommand, Helm will exit with an error + PlatformCommand []PlatformCommand `json:"platformCommand"` + + // Command is the plugin command, as a single string. + // Providing a command will result in an error if PlatformCommand is also set. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. Unless IgnoreFlags is set, this will + // also merge the flags passed from Helm. + // + // Note that command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. + Command string `json:"command"` + + // IgnoreFlags ignores any flags passed in from Helm + // + // For example, if the plugin is invoked as `helm --debug myplugin`, if this + // is false, `--debug` will be appended to `--command`. If this is true, + // the `--debug` flag will be discarded. + IgnoreFlags bool `json:"ignoreFlags"` + + // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. + // + // The command and args will be passed through environment expansion, so env vars can + // be present in the command. + // + // Note that the command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // The following rules will apply to processing platform hooks: + // - If PlatformHooks is present, it will be used + // - If both OS and Arch match the current platform, search will stop and the command will be executed + // - If OS matches and Arch is empty, the command will be executed + // - If no OS/Arch match is found, the default command will be executed + // - If no matches are found in platformHooks, Helm will skip the event + PlatformHooks PlatformHooks `json:"platformHooks"` + + // Hooks are commands that will run on plugin events, as a single string. + // Providing a hooks will result in an error if PlatformHooks is also set. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. + // + // Note that the command is executed in the sh shell. + // + // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. + Hooks Hooks + + // Downloaders field is used if the plugin supply downloader mechanism + // for special protocols. + Downloaders []Downloaders `json:"downloaders"` + + // UseTunnelDeprecated indicates that this command needs a tunnel. + // Setting this will cause a number of side effects, such as the + // automatic setting of HELM_HOST. + // DEPRECATED and unused, but retained for backwards compatibility with Helm 2 plugins. Remove in Helm 4 + UseTunnelDeprecated bool `json:"useTunnel,omitempty"` +} + +// Plugin represents a plugin. +type Plugin struct { + // Metadata is a parsed representation of a plugin.yaml + Metadata *Metadata + // Dir is the string path to the directory that holds the plugin. + Dir string +} + +// Returns command and args strings based on the following rules in priority order: +// - From the PlatformCommand where OS and Arch match the current platform +// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified +// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform +// - From the PlatformCommand where OS and Arch are both empty/unspecified +// - Return nil, nil +func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { + var command, args []string + found := false + foundOs := false + + eq := strings.EqualFold + for _, c := range cmds { + if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { + // Return early for an exact match + return strings.Split(c.Command, " "), c.Args + } + + if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 { + // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match + continue + } + + if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) { + // First OS match with empty arch, can only be overridden by a direct match + command = strings.Split(c.Command, " ") + args = c.Args + found = true + foundOs = true + } else if !found { + // First empty match, can be overridden by a direct match or an OS match + command = strings.Split(c.Command, " ") + args = c.Args + found = true + } + } + + return command, args +} + +// PrepareCommands takes a []Plugin.PlatformCommand +// and prepares the command and arguments for execution. +// +// It merges extraArgs into any arguments supplied in the plugin. It +// returns the main command and an args array. +// +// The result is suitable to pass to exec.Command. +func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) { + cmdParts, args := getPlatformCommand(cmds) + if len(cmdParts) == 0 || cmdParts[0] == "" { + return "", nil, fmt.Errorf("no plugin command is applicable") + } + + main := os.ExpandEnv(cmdParts[0]) + baseArgs := []string{} + if len(cmdParts) > 1 { + for _, cmdPart := range cmdParts[1:] { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(cmdPart)) + } else { + baseArgs = append(baseArgs, cmdPart) + } + } + } + + for _, arg := range args { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(arg)) + } else { + baseArgs = append(baseArgs, arg) + } + } + + if len(extraArgs) > 0 { + baseArgs = append(baseArgs, extraArgs...) + } + + return main, baseArgs, nil +} + +// PrepareCommand gets the correct command and arguments for a plugin. +// +// It merges extraArgs into any arguments supplied in the plugin. It returns the name of the command and an args array. +// +// The result is suitable to pass to exec.Command. +func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { + var extraArgsIn []string + + if !p.Metadata.IgnoreFlags { + extraArgsIn = extraArgs + } + + cmds := p.Metadata.PlatformCommand + if len(cmds) == 0 && len(p.Metadata.Command) > 0 { + cmds = []PlatformCommand{{Command: p.Metadata.Command}} + } + + return PrepareCommands(cmds, true, extraArgsIn) +} + +// validPluginName is a regular expression that validates plugin names. +// +// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-. +var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$") + +// validatePluginData validates a plugin's YAML data. +func validatePluginData(plug *Plugin, filepath string) error { + // When metadata section missing, initialize with no data + if plug.Metadata == nil { + plug.Metadata = &Metadata{} + } + if !validPluginName.MatchString(plug.Metadata.Name) { + return fmt.Errorf("invalid plugin name at %q", filepath) + } + plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) + + if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { + return fmt.Errorf("both platformCommand and command are set in %q", filepath) + } + + if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 { + return fmt.Errorf("both platformHooks and hooks are set in %q", filepath) + } + + // We could also validate SemVer, executable, and other fields should we so choose. + return nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} + +func detectDuplicates(plugs []*Plugin) error { + names := map[string]string{} + + for _, plug := range plugs { + if oldpath, ok := names[plug.Metadata.Name]; ok { + return fmt.Errorf( + "two plugins claim the name %q at %q and %q", + plug.Metadata.Name, + oldpath, + plug.Dir, + ) + } + names[plug.Metadata.Name] = plug.Dir + } + + return nil +} + +// LoadDir loads a plugin from the given directory. +func LoadDir(dirname string) (*Plugin, error) { + pluginfile := filepath.Join(dirname, PluginFileName) + data, err := os.ReadFile(pluginfile) + if err != nil { + return nil, errors.Wrapf(err, "failed to read plugin at %q", pluginfile) + } + + plug := &Plugin{Dir: dirname} + if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil { + return nil, errors.Wrapf(err, "failed to load plugin at %q", pluginfile) + } + return plug, validatePluginData(plug, pluginfile) +} + +// LoadAll loads all plugins found beneath the base directory. +// +// This scans only one directory level. +func LoadAll(basedir string) ([]*Plugin, error) { + plugins := []*Plugin{} + // We want basedir/*/plugin.yaml + scanpath := filepath.Join(basedir, "*", PluginFileName) + matches, err := filepath.Glob(scanpath) + if err != nil { + return plugins, errors.Wrapf(err, "failed to find plugins in %q", scanpath) + } + + if matches == nil { + return plugins, nil + } + + for _, yaml := range matches { + dir := filepath.Dir(yaml) + p, err := LoadDir(dir) + if err != nil { + return plugins, err + } + plugins = append(plugins, p) + } + return plugins, detectDuplicates(plugins) +} + +// FindPlugins returns a list of YAML files that describe plugins. +func FindPlugins(plugdirs string) ([]*Plugin, error) { + found := []*Plugin{} + // Let's get all UNIXy and allow path separators + for _, p := range filepath.SplitList(plugdirs) { + matches, err := LoadAll(p) + if err != nil { + return matches, err + } + found = append(found, matches...) + } + return found, nil +} + +// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because +// the plugin subsystem itself needs access to the environment variables +// created here. +func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { + env := settings.EnvVars() + env["HELM_PLUGIN_NAME"] = name + env["HELM_PLUGIN_DIR"] = base + for key, val := range env { + os.Setenv(key, val) + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/exec.go b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go new file mode 100644 index 000000000000..167e737d65c7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go @@ -0,0 +1,109 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postrender + +import ( + "bytes" + "io" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" +) + +type execRender struct { + binaryPath string + args []string +} + +// NewExec returns a PostRenderer implementation that calls the provided binary. +// It returns an error if the binary cannot be found. If the path does not +// contain any separators, it will search in $PATH, otherwise it will resolve +// any relative paths to a fully qualified path +func NewExec(binaryPath string, args ...string) (PostRenderer, error) { + fullPath, err := getFullPath(binaryPath) + if err != nil { + return nil, err + } + return &execRender{fullPath, args}, nil +} + +// Run the configured binary for the post render +func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { + cmd := exec.Command(p.binaryPath, p.args...) + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + var postRendered = &bytes.Buffer{} + var stderr = &bytes.Buffer{} + cmd.Stdout = postRendered + cmd.Stderr = stderr + + go func() { + defer stdin.Close() + io.Copy(stdin, renderedManifests) + }() + err = cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "error while running command %s. error output:\n%s", p.binaryPath, stderr.String()) + } + + return postRendered, nil +} + +// getFullPath returns the full filepath to the binary to execute. If the path +// does not contain any separators, it will search in $PATH, otherwise it will +// resolve any relative paths to a fully qualified path +func getFullPath(binaryPath string) (string, error) { + // NOTE(thomastaylor312): I am leaving this code commented out here. During + // the implementation of post-render, it was brought up that if we are + // relying on plugins, we should actually use the plugin system so it can + // properly handle multiple OSs. This will be a feature add in the future, + // so I left this code for reference. It can be deleted or reused once the + // feature is implemented + + // Manually check the plugin dir first + // if !strings.Contains(binaryPath, string(filepath.Separator)) { + // // First check the plugin dir + // pluginDir := helmpath.DataPath("plugins") // Default location + // // If location for plugins is explicitly set, check there + // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok { + // pluginDir = v + // } + // // The plugins variable can actually contain multiple paths, so loop through those + // for _, p := range filepath.SplitList(pluginDir) { + // _, err := os.Stat(filepath.Join(p, binaryPath)) + // if err != nil && !os.IsNotExist(err) { + // return "", err + // } else if err == nil { + // binaryPath = filepath.Join(p, binaryPath) + // break + // } + // } + // } + + // Now check for the binary using the given path or check if it exists in + // the path and is executable + checkedPath, err := exec.LookPath(binaryPath) + if err != nil { + return "", errors.Wrapf(err, "unable to find binary at %s", binaryPath) + } + + return filepath.Abs(checkedPath) +} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go new file mode 100644 index 000000000000..3af384290771 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package postrender contains an interface that can be implemented for custom +// post-renderers and an exec implementation that can be used for arbitrary +// binaries and scripts +package postrender + +import "bytes" + +type PostRenderer interface { + // Run expects a single buffer filled with Helm rendered manifests. It + // expects the modified results to be returned on a separate buffer or an + // error if there was an issue or failure while running the post render step + Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) +} diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/doc.go b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go new file mode 100644 index 000000000000..0c7ae0618035 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package provenance provides tools for establishing the authenticity of a chart. + +In Helm, provenance is established via several factors. The primary factor is the +cryptographic signature of a chart. Chart authors may sign charts, which in turn +provide the necessary metadata to ensure the integrity of the chart file, the +Chart.yaml, and the referenced Docker images. + +A provenance file is clear-signed. This provides cryptographic verification that +a particular block of information (Chart.yaml, archive file, images) have not +been tampered with or altered. To learn more, read the GnuPG documentation on +clear signatures: +https://www.gnupg.org/gph/en/manual/x135.html + +The cryptography used by Helm should be compatible with OpenGPG. For example, +you should be able to verify a signature by importing the desired public key +and using `gpg --verify`, `keybase pgp verify`, or similar: + + $ gpg --verify some.sig + gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762 + gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) " [ultimate] +*/ +package provenance // import "helm.sh/helm/v3/pkg/provenance" diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/sign.go b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go new file mode 100644 index 000000000000..7f89ef3f53ea --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go @@ -0,0 +1,427 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provenance + +import ( + "bytes" + "crypto" + "encoding/hex" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "golang.org/x/crypto/openpgp" //nolint + "golang.org/x/crypto/openpgp/clearsign" //nolint + "golang.org/x/crypto/openpgp/packet" //nolint + "sigs.k8s.io/yaml" + + hapi "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +var defaultPGPConfig = packet.Config{ + DefaultHash: crypto.SHA512, +} + +// SumCollection represents a collection of file and image checksums. +// +// Files are of the form: +// +// FILENAME: "sha256:SUM" +// +// Images are of the form: +// +// "IMAGE:TAG": "sha256:SUM" +// +// Docker optionally supports sha512, and if this is the case, the hash marker +// will be 'sha512' instead of 'sha256'. +type SumCollection struct { + Files map[string]string `json:"files"` + Images map[string]string `json:"images,omitempty"` +} + +// Verification contains information about a verification operation. +type Verification struct { + // SignedBy contains the entity that signed a chart. + SignedBy *openpgp.Entity + // FileHash is the hash, prepended with the scheme, for the file that was verified. + FileHash string + // FileName is the name of the file that FileHash verifies. + FileName string +} + +// Signatory signs things. +// +// Signatories can be constructed from a PGP private key file using NewFromFiles +// or they can be constructed manually by setting the Entity to a valid +// PGP entity. +// +// The same Signatory can be used to sign or validate multiple charts. +type Signatory struct { + // The signatory for this instance of Helm. This is used for signing. + Entity *openpgp.Entity + // The keyring for this instance of Helm. This is used for verification. + KeyRing openpgp.EntityList +} + +// NewFromFiles constructs a new Signatory from the PGP key in the given filename. +// +// This will emit an error if it cannot find a valid GPG keyfile (entity) at the +// given location. +// +// Note that the keyfile may have just a public key, just a private key, or +// both. The Signatory methods may have different requirements of the keys. For +// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it +// can sign something. +func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) { + e, err := loadKey(keyfile) + if err != nil { + return nil, err + } + + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + return &Signatory{ + Entity: e, + KeyRing: ring, + }, nil +} + +// NewFromKeyring reads a keyring file and creates a Signatory. +// +// If id is not the empty string, this will also try to find an Entity in the +// keyring whose name matches, and set that as the signing entity. It will return +// an error if the id is not empty and also not found. +func NewFromKeyring(keyringfile, id string) (*Signatory, error) { + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + s := &Signatory{KeyRing: ring} + + // If the ID is empty, we can return now. + if id == "" { + return s, nil + } + + // We're gonna go all GnuPG on this and look for a string that _contains_. If + // two or more keys contain the string and none are a direct match, we error + // out. + var candidate *openpgp.Entity + vague := false + for _, e := range ring { + for n := range e.Identities { + if n == id { + s.Entity = e + return s, nil + } + if strings.Contains(n, id) { + if candidate != nil { + vague = true + } + candidate = e + } + } + } + if vague { + return s, errors.Errorf("more than one key contain the id %q", id) + } + + s.Entity = candidate + return s, nil +} + +// PassphraseFetcher returns a passphrase for decrypting keys. +// +// This is used as a callback to read a passphrase from some other location. The +// given name is the Name field on the key, typically of the form: +// +// USER_NAME (COMMENT) +type PassphraseFetcher func(name string) ([]byte, error) + +// DecryptKey decrypts a private key in the Signatory. +// +// If the key is not encrypted, this will return without error. +// +// If the key does not exist, this will return an error. +// +// If the key exists, but cannot be unlocked with the passphrase returned by +// the PassphraseFetcher, this will return an error. +// +// If the key is successfully unlocked, it will return nil. +func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { + if s.Entity == nil { + return errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return errors.New("provided key is not a private key. Try providing a keyring with secret keys") + } + + // Nothing else to do if key is not encrypted. + if !s.Entity.PrivateKey.Encrypted { + return nil + } + + fname := "Unknown" + for i := range s.Entity.Identities { + if i != "" { + fname = i + break + } + } + + p, err := fn(fname) + if err != nil { + return err + } + + return s.Entity.PrivateKey.Decrypt(p) +} + +// ClearSign signs a chart with the given key. +// +// This takes the path to a chart archive file and a key, and it returns a clear signature. +// +// The Signatory must have a valid Entity.PrivateKey for this to work. If it does +// not, an error will be returned. +func (s *Signatory) ClearSign(chartpath string) (string, error) { + if s.Entity == nil { + return "", errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys") + } + + if fi, err := os.Stat(chartpath); err != nil { + return "", err + } else if fi.IsDir() { + return "", errors.New("cannot sign a directory") + } + + out := bytes.NewBuffer(nil) + + b, err := messageBlock(chartpath) + if err != nil { + return "", err + } + + // Sign the buffer + w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig) + if err != nil { + return "", err + } + + _, err = io.Copy(w, b) + + if err != nil { + // NB: We intentionally don't call `w.Close()` here! `w.Close()` is the method which + // actually does the PGP signing, and therefore is the part which uses the private key. + // In other words, if we call Close here, there's a risk that there's an attempt to use the + // private key to sign garbage data (since we know that io.Copy failed, `w` won't contain + // anything useful). + return "", errors.Wrap(err, "failed to write to clearsign encoder") + } + + err = w.Close() + if err != nil { + return "", errors.Wrap(err, "failed to either sign or armor message block") + } + + return out.String(), nil +} + +// Verify checks a signature and verifies that it is legit for a chart. +func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { + ver := &Verification{} + for _, fname := range []string{chartpath, sigpath} { + if fi, err := os.Stat(fname); err != nil { + return ver, err + } else if fi.IsDir() { + return ver, errors.Errorf("%s cannot be a directory", fname) + } + } + + // First verify the signature + sig, err := s.decodeSignature(sigpath) + if err != nil { + return ver, errors.Wrap(err, "failed to decode signature") + } + + by, err := s.verifySignature(sig) + if err != nil { + return ver, err + } + ver.SignedBy = by + + // Second, verify the hash of the tarball. + sum, err := DigestFile(chartpath) + if err != nil { + return ver, err + } + _, sums, err := parseMessageBlock(sig.Plaintext) + if err != nil { + return ver, err + } + + sum = "sha256:" + sum + basename := filepath.Base(chartpath) + if sha, ok := sums.Files[basename]; !ok { + return ver, errors.Errorf("provenance does not contain a SHA for a file named %q", basename) + } else if sha != sum { + return ver, errors.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum) + } + ver.FileHash = sum + ver.FileName = basename + + // TODO: when image signing is added, verify that here. + + return ver, nil +} + +func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + block, _ := clearsign.Decode(data) + if block == nil { + // There was no sig in the file. + return nil, errors.New("signature block not found") + } + + return block, nil +} + +// verifySignature verifies that the given block is validly signed, and returns the signer. +func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) { + return openpgp.CheckDetachedSignature( + s.KeyRing, + bytes.NewBuffer(block.Bytes), + block.ArmoredSignature.Body, + ) +} + +func messageBlock(chartpath string) (*bytes.Buffer, error) { + var b *bytes.Buffer + // Checksum the archive + chash, err := DigestFile(chartpath) + if err != nil { + return b, err + } + + base := filepath.Base(chartpath) + sums := &SumCollection{ + Files: map[string]string{ + base: "sha256:" + chash, + }, + } + + // Load the archive into memory. + chart, err := loader.LoadFile(chartpath) + if err != nil { + return b, err + } + + // Buffer a hash + checksums YAML file + data, err := yaml.Marshal(chart.Metadata) + if err != nil { + return b, err + } + + // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP + // clearsign block. So we use ...\n, which is the YAML document end marker. + // http://yaml.org/spec/1.2/spec.html#id2800168 + b = bytes.NewBuffer(data) + b.WriteString("\n...\n") + + data, err = yaml.Marshal(sums) + if err != nil { + return b, err + } + b.Write(data) + + return b, nil +} + +// parseMessageBlock +func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) { + // This sucks. + parts := bytes.Split(data, []byte("\n...\n")) + if len(parts) < 2 { + return nil, nil, errors.New("message block must have at least two parts") + } + + md := &hapi.Metadata{} + sc := &SumCollection{} + + if err := yaml.Unmarshal(parts[0], md); err != nil { + return md, sc, err + } + err := yaml.Unmarshal(parts[1], sc) + return md, sc, err +} + +// loadKey loads a GPG key found at a particular path. +func loadKey(keypath string) (*openpgp.Entity, error) { + f, err := os.Open(keypath) + if err != nil { + return nil, err + } + defer f.Close() + + pr := packet.NewReader(f) + return openpgp.ReadEntity(pr) +} + +func loadKeyRing(ringpath string) (openpgp.EntityList, error) { + f, err := os.Open(ringpath) + if err != nil { + return nil, err + } + defer f.Close() + return openpgp.ReadKeyRing(f) +} + +// DigestFile calculates a SHA256 hash (like Docker) for a given file. +// +// It takes the path to the archive file, and returns a string representation of +// the SHA256 sum. +// +// The intended use of this function is to generate a sum of a chart TGZ file. +func DigestFile(filename string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + return Digest(f) +} + +// Digest hashes a reader and returns a SHA256 digest. +// +// Helm uses SHA256 as its default hash for all non-cryptographic applications. +func Digest(in io.Reader) (string, error) { + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", nil + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/doc.go b/vendor/helm.sh/helm/v3/pkg/pusher/doc.go new file mode 100644 index 000000000000..df89ab112b53 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package pusher provides a generalized tool for uploading data by scheme. +This provides a method by which the plugin system can load arbitrary protocol +handlers based upon a URL scheme. +*/ +package pusher diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go b/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go new file mode 100644 index 000000000000..33296aaddc17 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go @@ -0,0 +1,157 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pusher + +import ( + "fmt" + "net" + "net/http" + "os" + "path" + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/time/ctime" +) + +// OCIPusher is the default OCI backend handler +type OCIPusher struct { + opts options +} + +// Push performs a Push from repo.Pusher. +func (pusher *OCIPusher) Push(chartRef, href string, options ...Option) error { + for _, opt := range options { + opt(&pusher.opts) + } + return pusher.push(chartRef, href) +} + +func (pusher *OCIPusher) push(chartRef, href string) error { + stat, err := os.Stat(chartRef) + if err != nil { + if os.IsNotExist(err) { + return errors.Errorf("%s: no such file", chartRef) + } + return err + } + if stat.IsDir() { + return errors.New("cannot push directory, must provide chart archive (.tgz)") + } + + meta, err := loader.Load(chartRef) + if err != nil { + return err + } + + client := pusher.opts.registryClient + if client == nil { + c, err := pusher.newRegistryClient() + if err != nil { + return err + } + client = c + } + + chartBytes, err := os.ReadFile(chartRef) + if err != nil { + return err + } + + var pushOpts []registry.PushOption + provRef := fmt.Sprintf("%s.prov", chartRef) + if _, err := os.Stat(provRef); err == nil { + provBytes, err := os.ReadFile(provRef) + if err != nil { + return err + } + pushOpts = append(pushOpts, registry.PushOptProvData(provBytes)) + } + + ref := fmt.Sprintf("%s:%s", + path.Join(strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)), meta.Metadata.Name), + meta.Metadata.Version) + + // The time the chart was "created" is semantically the time the chart archive file was last written(modified) + chartArchiveFileCreatedTime := ctime.Modified(stat) + pushOpts = append(pushOpts, registry.PushOptCreationTime(chartArchiveFileCreatedTime.Format(time.RFC3339))) + + _, err = client.Push(chartBytes, ref, pushOpts...) + return err +} + +// NewOCIPusher constructs a valid OCI client as a Pusher +func NewOCIPusher(ops ...Option) (Pusher, error) { + var client OCIPusher + + for _, opt := range ops { + opt(&client.opts) + } + + return &client, nil +} + +func (pusher *OCIPusher) newRegistryClient() (*registry.Client, error) { + if (pusher.opts.certFile != "" && pusher.opts.keyFile != "") || pusher.opts.caFile != "" || pusher.opts.insecureSkipTLSverify { + tlsConf, err := tlsutil.NewClientTLS(pusher.opts.certFile, pusher.opts.keyFile, pusher.opts.caFile, pusher.opts.insecureSkipTLSverify) + if err != nil { + return nil, errors.Wrap(err, "can't create TLS config for client") + } + + registryClient, err := registry.NewClient( + registry.ClientOptHTTPClient(&http.Client{ + // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87 + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + // By default we wrap the transport in retries, so reduce the + // default dial timeout to 5s to avoid 5x 30s of connection + // timeouts when doing the "ping" on certain http registries. + Timeout: 5 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: tlsConf, + }, + }), + registry.ClientOptEnableCache(true), + ) + if err != nil { + return nil, err + } + return registryClient, nil + } + + opts := []registry.ClientOption{registry.ClientOptEnableCache(true)} + if pusher.opts.plainHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + + registryClient, err := registry.NewClient(opts...) + if err != nil { + return nil, err + } + return registryClient, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go new file mode 100644 index 000000000000..5b8a9160f012 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pusher + +import ( + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" +) + +// options are generic parameters to be provided to the pusher during instantiation. +// +// Pushers may or may not ignore these parameters as they are passed in. +type options struct { + registryClient *registry.Client + certFile string + keyFile string + caFile string + insecureSkipTLSverify bool + plainHTTP bool +} + +// Option allows specifying various settings configurable by the user for overriding the defaults +// used when performing Push operations with the Pusher. +type Option func(*options) + +// WithRegistryClient sets the registryClient option. +func WithRegistryClient(client *registry.Client) Option { + return func(opts *options) { + opts.registryClient = client + } +} + +// WithTLSClientConfig sets the client auth with the provided credentials. +func WithTLSClientConfig(certFile, keyFile, caFile string) Option { + return func(opts *options) { + opts.certFile = certFile + opts.keyFile = keyFile + opts.caFile = caFile + } +} + +// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option { + return func(opts *options) { + opts.insecureSkipTLSverify = insecureSkipTLSVerify + } +} + +func WithPlainHTTP(plainHTTP bool) Option { + return func(opts *options) { + opts.plainHTTP = plainHTTP + } +} + +// Pusher is an interface to support upload to the specified URL. +type Pusher interface { + // Push file content by url string + Push(chartRef, url string, options ...Option) error +} + +// Constructor is the function for every pusher which creates a specific instance +// according to the configuration +type Constructor func(options ...Option) (Pusher, error) + +// Provider represents any pusher and the schemes that it supports. +type Provider struct { + Schemes []string + New Constructor +} + +// Provides returns true if the given scheme is supported by this Provider. +func (p Provider) Provides(scheme string) bool { + for _, i := range p.Schemes { + if i == scheme { + return true + } + } + return false +} + +// Providers is a collection of Provider objects. +type Providers []Provider + +// ByScheme returns a Provider that handles the given scheme. +// +// If no provider handles this scheme, this will return an error. +func (p Providers) ByScheme(scheme string) (Pusher, error) { + for _, pp := range p { + if pp.Provides(scheme) { + return pp.New() + } + } + return nil, errors.Errorf("scheme %q not supported", scheme) +} + +var ociProvider = Provider{ + Schemes: []string{registry.OCIScheme}, + New: NewOCIPusher, +} + +// All finds all of the registered pushers as a list of Provider instances. +// Currently, just the built-in pushers are collected. +func All(_ *cli.EnvSettings) Providers { + result := Providers{ociProvider} + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/client.go b/vendor/helm.sh/helm/v3/pkg/registry/client.go new file mode 100644 index 000000000000..de7e636e162f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/client.go @@ -0,0 +1,896 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + "github.com/containerd/containerd/remotes" + "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "oras.land/oras-go/v2" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/content/memory" + "oras.land/oras-go/v2/registry" + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials" + "oras.land/oras-go/v2/registry/remote/retry" + + "helm.sh/helm/v3/internal/version" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/helmpath" +) + +// See https://github.com/helm/helm/issues/10166 +const registryUnderscoreMessage = ` +OCI artifact references (e.g. tags) do not support the plus sign (+). To support +storing semantic versions, Helm adopts the convention of changing plus (+) to +an underscore (_) in chart version tags when pushing to a registry and back to +a plus (+) when pulling from a registry.` + +var errDeprecatedRemote = errors.New("providing github.com/containerd/containerd/remotes.Resolver via ClientOptResolver is no longer suported") + +type ( + // RemoteClient shadows the ORAS remote.Client interface + // (hiding the ORAS type from Helm client visibility) + // https://pkg.go.dev/oras.land/oras-go/pkg/registry/remote#Client + RemoteClient interface { + Do(req *http.Request) (*http.Response, error) + } + + // Client works with OCI-compliant registries + Client struct { + debug bool + enableCache bool + // path to repository config file e.g. ~/.docker/config.json + credentialsFile string + username string + password string + out io.Writer + authorizer *auth.Client + registryAuthorizer RemoteClient + credentialsStore credentials.Store + httpClient *http.Client + plainHTTP bool + err error // pass any errors from the ClientOption functions + } + + // ClientOption allows specifying various settings configurable by the user for overriding the defaults + // used when creating a new default client + ClientOption func(*Client) +) + +// NewClient returns a new registry client with config +func NewClient(options ...ClientOption) (*Client, error) { + client := &Client{ + out: io.Discard, + } + for _, option := range options { + option(client) + if client.err != nil { + return nil, client.err + } + } + if client.credentialsFile == "" { + client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename) + } + if client.httpClient == nil { + transport := newTransport() + client.httpClient = &http.Client{ + Transport: retry.NewTransport(transport), + } + } + + storeOptions := credentials.StoreOptions{ + AllowPlaintextPut: true, + DetectDefaultNativeStore: true, + } + store, err := credentials.NewStore(client.credentialsFile, storeOptions) + if err != nil { + return nil, err + } + dockerStore, err := credentials.NewStoreFromDocker(storeOptions) + if err != nil { + // should only fail if user home directory can't be determined + client.credentialsStore = store + } else { + // use Helm credentials with fallback to Docker + client.credentialsStore = credentials.NewStoreWithFallbacks(store, dockerStore) + } + + if client.authorizer == nil { + authorizer := auth.Client{ + Client: client.httpClient, + } + authorizer.SetUserAgent(version.GetUserAgent()) + + authorizer.Credential = credentials.Credential(client.credentialsStore) + + if client.enableCache { + authorizer.Cache = auth.NewCache() + } + + client.authorizer = &authorizer + } + + return client, nil +} + +// ClientOptDebug returns a function that sets the debug setting on client options set +func ClientOptDebug(debug bool) ClientOption { + return func(client *Client) { + client.debug = debug + } +} + +// ClientOptEnableCache returns a function that sets the enableCache setting on a client options set +func ClientOptEnableCache(enableCache bool) ClientOption { + return func(client *Client) { + client.enableCache = enableCache + } +} + +// ClientOptBasicAuth returns a function that sets the username and password setting on client options set +func ClientOptBasicAuth(username, password string) ClientOption { + return func(client *Client) { + client.username = username + client.password = password + } +} + +// ClientOptWriter returns a function that sets the writer setting on client options set +func ClientOptWriter(out io.Writer) ClientOption { + return func(client *Client) { + client.out = out + } +} + +// ClientOptAuthorizer returns a function that sets the authorizer setting on a client options set. This +// can be used to override the default authorization mechanism. +// +// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer. +func ClientOptAuthorizer(authorizer auth.Client) ClientOption { + return func(client *Client) { + client.authorizer = &authorizer + } +} + +// ClientOptRegistryAuthorizer returns a function that sets the registry authorizer setting on a client options set. This +// can be used to override the default authorization mechanism. +// +// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer. +func ClientOptRegistryAuthorizer(registryAuthorizer RemoteClient) ClientOption { + return func(client *Client) { + client.registryAuthorizer = registryAuthorizer + } +} + +// ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set +func ClientOptCredentialsFile(credentialsFile string) ClientOption { + return func(client *Client) { + client.credentialsFile = credentialsFile + } +} + +// ClientOptHTTPClient returns a function that sets the httpClient setting on a client options set +func ClientOptHTTPClient(httpClient *http.Client) ClientOption { + return func(client *Client) { + client.httpClient = httpClient + } +} + +func ClientOptPlainHTTP() ClientOption { + return func(c *Client) { + c.plainHTTP = true + } +} + +func ClientOptResolver(_ remotes.Resolver) ClientOption { + return func(c *Client) { + c.err = errDeprecatedRemote + } +} + +type ( + // LoginOption allows specifying various settings on login + LoginOption func(*loginOperation) + + loginOperation struct { + host string + client *Client + } +) + +// Login logs into a registry +func (c *Client) Login(host string, options ...LoginOption) error { + for _, option := range options { + option(&loginOperation{host, c}) + } + + reg, err := remote.NewRegistry(host) + if err != nil { + return err + } + reg.PlainHTTP = c.plainHTTP + reg.Client = c.authorizer + + ctx := context.Background() + cred, err := c.authorizer.Credential(ctx, host) + if err != nil { + return fmt.Errorf("fetching credentials for %q: %w", host, err) + } + + if err := reg.Ping(ctx); err != nil { + return fmt.Errorf("authenticating to %q: %w", host, err) + } + + key := credentials.ServerAddressFromRegistry(host) + if err := c.credentialsStore.Put(ctx, key, cred); err != nil { + return err + } + + fmt.Fprintln(c.out, "Login Succeeded") + return nil +} + +// LoginOptBasicAuth returns a function that sets the username/password settings on login +func LoginOptBasicAuth(username string, password string) LoginOption { + return func(o *loginOperation) { + o.client.username = username + o.client.password = password + o.client.authorizer.Credential = auth.StaticCredential(o.host, auth.Credential{Username: username, Password: password}) + } +} + +// LoginOptPlainText returns a function that allows plaintext (HTTP) login +func LoginOptPlainText(isPlainText bool) LoginOption { + return func(o *loginOperation) { + o.client.plainHTTP = isPlainText + } +} + +func ensureTLSConfig(client *auth.Client) (*tls.Config, error) { + var transport *http.Transport + + switch t := client.Client.Transport.(type) { + case *http.Transport: + transport = t + case *retry.Transport: + switch t := t.Base.(type) { + case *http.Transport: + transport = t + case *fallbackTransport: + switch t := t.Base.(type) { + case *http.Transport: + transport = t + } + } + } + + if transport == nil { + // we don't know how to access the http.Transport, most likely the + // auth.Client.Client was provided by API user + return nil, fmt.Errorf("unable to access TLS client configuration, the provided HTTP Transport is not supported, given: %T", client.Client.Transport) + } + + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + + return transport.TLSClientConfig, nil +} + +// LoginOptInsecure returns a function that sets the insecure setting on login +func LoginOptInsecure(insecure bool) LoginOption { + return func(o *loginOperation) { + tlsConfig, err := ensureTLSConfig(o.client.authorizer) + + if err != nil { + panic(err) + } + + tlsConfig.InsecureSkipVerify = insecure + } +} + +// LoginOptTLSClientConfig returns a function that sets the TLS settings on login. +func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption { + return func(o *loginOperation) { + if (certFile == "" || keyFile == "") && caFile == "" { + return + } + tlsConfig, err := ensureTLSConfig(o.client.authorizer) + if err != nil { + panic(err) + } + + if certFile != "" && keyFile != "" { + authCert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + panic(err) + } + tlsConfig.Certificates = []tls.Certificate{authCert} + } + + if caFile != "" { + certPool := x509.NewCertPool() + ca, err := os.ReadFile(caFile) + if err != nil { + panic(err) + } + if !certPool.AppendCertsFromPEM(ca) { + panic(fmt.Errorf("unable to parse CA file: %q", caFile)) + } + tlsConfig.RootCAs = certPool + } + } +} + +type ( + // LogoutOption allows specifying various settings on logout + LogoutOption func(*logoutOperation) + + logoutOperation struct{} +) + +// Logout logs out of a registry +func (c *Client) Logout(host string, opts ...LogoutOption) error { + operation := &logoutOperation{} + for _, opt := range opts { + opt(operation) + } + + if err := credentials.Logout(context.Background(), c.credentialsStore, host); err != nil { + return err + } + fmt.Fprintf(c.out, "Removing login credentials for %s\n", host) + return nil +} + +type ( + // PullOption allows specifying various settings on pull + PullOption func(*pullOperation) + + // PullResult is the result returned upon successful pull. + PullResult struct { + Manifest *DescriptorPullSummary `json:"manifest"` + Config *DescriptorPullSummary `json:"config"` + Chart *DescriptorPullSummaryWithMeta `json:"chart"` + Prov *DescriptorPullSummary `json:"prov"` + Ref string `json:"ref"` + } + + DescriptorPullSummary struct { + Data []byte `json:"-"` + Digest string `json:"digest"` + Size int64 `json:"size"` + } + + DescriptorPullSummaryWithMeta struct { + DescriptorPullSummary + Meta *chart.Metadata `json:"meta"` + } + + pullOperation struct { + withChart bool + withProv bool + ignoreMissingProv bool + } +) + +// Pull downloads a chart from a registry +func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { + parsedRef, err := newReference(ref) + if err != nil { + return nil, err + } + + operation := &pullOperation{ + withChart: true, // By default, always download the chart layer + } + for _, option := range options { + option(operation) + } + if !operation.withChart && !operation.withProv { + return nil, errors.New( + "must specify at least one layer to pull (chart/prov)") + } + memoryStore := memory.New() + allowedMediaTypes := []string{ + ocispec.MediaTypeImageManifest, + ConfigMediaType, + } + minNumDescriptors := 1 // 1 for the config + if operation.withChart { + minNumDescriptors++ + allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType) + } + if operation.withProv { + if !operation.ignoreMissingProv { + minNumDescriptors++ + } + allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType) + } + + var descriptors, layers []ocispec.Descriptor + + repository, err := remote.NewRepository(parsedRef.String()) + if err != nil { + return nil, err + } + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + ctx := context.Background() + + sort.Strings(allowedMediaTypes) + + var mu sync.Mutex + manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{ + CopyGraphOptions: oras.CopyGraphOptions{ + PreCopy: func(_ context.Context, desc ocispec.Descriptor) error { + mediaType := desc.MediaType + if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType { + return errors.Errorf("media type %q is not allowed, found in descriptor with digest: %q", mediaType, desc.Digest) + } + + mu.Lock() + layers = append(layers, desc) + mu.Unlock() + return nil + }, + }, + }) + if err != nil { + return nil, err + } + + descriptors = append(descriptors, manifest) + descriptors = append(descriptors, layers...) + + numDescriptors := len(descriptors) + if numDescriptors < minNumDescriptors { + return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d", + minNumDescriptors, numDescriptors) + } + var configDescriptor *ocispec.Descriptor + var chartDescriptor *ocispec.Descriptor + var provDescriptor *ocispec.Descriptor + for _, descriptor := range descriptors { + d := descriptor + switch d.MediaType { + case ConfigMediaType: + configDescriptor = &d + case ChartLayerMediaType: + chartDescriptor = &d + case ProvLayerMediaType: + provDescriptor = &d + case LegacyChartLayerMediaType: + chartDescriptor = &d + fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType) + } + } + if configDescriptor == nil { + return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType) + } + if operation.withChart && chartDescriptor == nil { + return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", + ChartLayerMediaType) + } + var provMissing bool + if operation.withProv && provDescriptor == nil { + if operation.ignoreMissingProv { + provMissing = true + } else { + return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", + ProvLayerMediaType) + } + } + result := &PullResult{ + Manifest: &DescriptorPullSummary{ + Digest: manifest.Digest.String(), + Size: manifest.Size, + }, + Config: &DescriptorPullSummary{ + Digest: configDescriptor.Digest.String(), + Size: configDescriptor.Size, + }, + Chart: &DescriptorPullSummaryWithMeta{}, + Prov: &DescriptorPullSummary{}, + Ref: parsedRef.String(), + } + + result.Manifest.Data, err = content.FetchAll(ctx, memoryStore, manifest) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", manifest.Digest, err) + } + + result.Config.Data, err = content.FetchAll(ctx, memoryStore, *configDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", configDescriptor.Digest, err) + } + + if err := json.Unmarshal(result.Config.Data, &result.Chart.Meta); err != nil { + return nil, err + } + + if operation.withChart { + result.Chart.Data, err = content.FetchAll(ctx, memoryStore, *chartDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", chartDescriptor.Digest, err) + } + result.Chart.Digest = chartDescriptor.Digest.String() + result.Chart.Size = chartDescriptor.Size + } + + if operation.withProv && !provMissing { + result.Prov.Data, err = content.FetchAll(ctx, memoryStore, *provDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", provDescriptor.Digest, err) + } + result.Prov.Digest = provDescriptor.Digest.String() + result.Prov.Size = provDescriptor.Size + } + + fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref) + fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) + + if strings.Contains(result.Ref, "_") { + fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) + fmt.Fprint(c.out, registryUnderscoreMessage+"\n") + } + + return result, nil +} + +// PullOptWithChart returns a function that sets the withChart setting on pull +func PullOptWithChart(withChart bool) PullOption { + return func(operation *pullOperation) { + operation.withChart = withChart + } +} + +// PullOptWithProv returns a function that sets the withProv setting on pull +func PullOptWithProv(withProv bool) PullOption { + return func(operation *pullOperation) { + operation.withProv = withProv + } +} + +// PullOptIgnoreMissingProv returns a function that sets the ignoreMissingProv setting on pull +func PullOptIgnoreMissingProv(ignoreMissingProv bool) PullOption { + return func(operation *pullOperation) { + operation.ignoreMissingProv = ignoreMissingProv + } +} + +type ( + // PushOption allows specifying various settings on push + PushOption func(*pushOperation) + + // PushResult is the result returned upon successful push. + PushResult struct { + Manifest *descriptorPushSummary `json:"manifest"` + Config *descriptorPushSummary `json:"config"` + Chart *descriptorPushSummaryWithMeta `json:"chart"` + Prov *descriptorPushSummary `json:"prov"` + Ref string `json:"ref"` + } + + descriptorPushSummary struct { + Digest string `json:"digest"` + Size int64 `json:"size"` + } + + descriptorPushSummaryWithMeta struct { + descriptorPushSummary + Meta *chart.Metadata `json:"meta"` + } + + pushOperation struct { + provData []byte + strictMode bool + creationTime string + } +) + +// Push uploads a chart to a registry. +func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) { + parsedRef, err := newReference(ref) + if err != nil { + return nil, err + } + + operation := &pushOperation{ + strictMode: true, // By default, enable strict mode + } + for _, option := range options { + option(operation) + } + meta, err := extractChartMeta(data) + if err != nil { + return nil, err + } + if operation.strictMode { + if !strings.HasSuffix(ref, fmt.Sprintf("/%s:%s", meta.Name, meta.Version)) { + return nil, errors.New( + "strict mode enabled, ref basename and tag must match the chart name and version") + } + } + + ctx := context.Background() + + memoryStore := memory.New() + chartDescriptor, err := oras.PushBytes(ctx, memoryStore, ChartLayerMediaType, data) + if err != nil { + return nil, err + } + + configData, err := json.Marshal(meta) + if err != nil { + return nil, err + } + + configDescriptor, err := oras.PushBytes(ctx, memoryStore, ConfigMediaType, configData) + if err != nil { + return nil, err + } + + layers := []ocispec.Descriptor{chartDescriptor} + var provDescriptor ocispec.Descriptor + if operation.provData != nil { + provDescriptor, err = oras.PushBytes(ctx, memoryStore, ProvLayerMediaType, operation.provData) + if err != nil { + return nil, err + } + + layers = append(layers, provDescriptor) + } + + // sort layers for determinism, similar to how ORAS v1 does it + sort.Slice(layers, func(i, j int) bool { + return layers[i].Digest < layers[j].Digest + }) + + ociAnnotations := generateOCIAnnotations(meta, operation.creationTime) + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: configDescriptor, + Layers: layers, + Annotations: ociAnnotations, + } + + manifestData, err := json.Marshal(manifest) + if err != nil { + return nil, err + } + + manifestDescriptor, err := oras.TagBytes(ctx, memoryStore, ocispec.MediaTypeImageManifest, manifestData, ref) + if err != nil { + return nil, err + } + + repository, err := remote.NewRepository(parsedRef.String()) + if err != nil { + return nil, err + } + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + manifestDescriptor, err = oras.ExtendedCopy(ctx, memoryStore, parsedRef.String(), repository, parsedRef.String(), oras.DefaultExtendedCopyOptions) + if err != nil { + return nil, err + } + + chartSummary := &descriptorPushSummaryWithMeta{ + Meta: meta, + } + chartSummary.Digest = chartDescriptor.Digest.String() + chartSummary.Size = chartDescriptor.Size + result := &PushResult{ + Manifest: &descriptorPushSummary{ + Digest: manifestDescriptor.Digest.String(), + Size: manifestDescriptor.Size, + }, + Config: &descriptorPushSummary{ + Digest: configDescriptor.Digest.String(), + Size: configDescriptor.Size, + }, + Chart: chartSummary, + Prov: &descriptorPushSummary{}, // prevent nil references + Ref: parsedRef.String(), + } + if operation.provData != nil { + result.Prov = &descriptorPushSummary{ + Digest: provDescriptor.Digest.String(), + Size: provDescriptor.Size, + } + } + fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref) + fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) + if strings.Contains(parsedRef.orasReference.Reference, "_") { + fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) + fmt.Fprint(c.out, registryUnderscoreMessage+"\n") + } + + return result, err +} + +// PushOptProvData returns a function that sets the prov bytes setting on push +func PushOptProvData(provData []byte) PushOption { + return func(operation *pushOperation) { + operation.provData = provData + } +} + +// PushOptStrictMode returns a function that sets the strictMode setting on push +func PushOptStrictMode(strictMode bool) PushOption { + return func(operation *pushOperation) { + operation.strictMode = strictMode + } +} + +// PushOptCreationDate returns a function that sets the creation time +func PushOptCreationTime(creationTime string) PushOption { + return func(operation *pushOperation) { + operation.creationTime = creationTime + } +} + +// Tags provides a sorted list all semver compliant tags for a given repository +func (c *Client) Tags(ref string) ([]string, error) { + parsedReference, err := registry.ParseReference(ref) + if err != nil { + return nil, err + } + + ctx := context.Background() + repository, err := remote.NewRepository(parsedReference.String()) + if err != nil { + return nil, err + } + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + var tagVersions []*semver.Version + err = repository.Tags(ctx, "", func(tags []string) error { + for _, tag := range tags { + // Change underscore (_) back to plus (+) for Helm + // See https://github.com/helm/helm/issues/10166 + tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+")) + if err == nil { + tagVersions = append(tagVersions, tagVersion) + } + } + + return nil + }) + if err != nil { + return nil, err + } + + // Sort the collection + sort.Sort(sort.Reverse(semver.Collection(tagVersions))) + + tags := make([]string, len(tagVersions)) + + for iTv, tv := range tagVersions { + tags[iTv] = tv.String() + } + + return tags, nil + +} + +// Resolve a reference to a descriptor. +func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) { + remoteRepository, err := remote.NewRepository(ref) + if err != nil { + return desc, err + } + remoteRepository.PlainHTTP = c.plainHTTP + + parsedReference, err := newReference(ref) + if err != nil { + return desc, err + } + + ctx := context.Background() + parsedString := parsedReference.String() + return remoteRepository.Resolve(ctx, parsedString) +} + +// ValidateReference for path and version +func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, error) { + var tag string + + registryReference, err := newReference(u.Host + u.Path) + if err != nil { + return nil, err + } + + if version == "" { + // Use OCI URI tag as default + version = registryReference.Tag + } else { + if registryReference.Tag != "" && registryReference.Tag != version { + return nil, errors.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag) + } + } + + if registryReference.Digest != "" { + if version == "" { + // Install by digest only + return u, nil + } + u.Path = fmt.Sprintf("%s@%s", registryReference.Repository, registryReference.Digest) + + // Validate the tag if it was specified + path := registryReference.Registry + "/" + registryReference.Repository + ":" + version + desc, err := c.Resolve(path) + if err != nil { + // The resource does not have to be tagged when digest is specified + return u, nil + } + if desc.Digest.String() != registryReference.Digest { + return nil, errors.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest) + } + return u, nil + } + + // Evaluate whether an explicit version has been provided. Otherwise, determine version to use + _, errSemVer := semver.NewVersion(version) + if errSemVer == nil { + tag = version + } else { + // Retrieve list of repository tags + tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme))) + if err != nil { + return nil, err + } + if len(tags) == 0 { + return nil, errors.Errorf("Unable to locate any tags in provided repository: %s", ref) + } + + // Determine if version provided + // If empty, try to get the highest available tag + // If exact version, try to find it + // If semver constraint string, try to find a match + tag, err = GetTagMatchingVersionOrConstraint(tags, version) + if err != nil { + return nil, err + } + } + + u.Path = fmt.Sprintf("%s:%s", registryReference.Repository, tag) + + return u, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/constants.go b/vendor/helm.sh/helm/v3/pkg/registry/constants.go new file mode 100644 index 000000000000..570b6f0d3f08 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/constants.go @@ -0,0 +1,37 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +const ( + // OCIScheme is the URL scheme for OCI-based requests + OCIScheme = "oci" + + // CredentialsFileBasename is the filename for auth credentials file + CredentialsFileBasename = "registry/config.json" + + // ConfigMediaType is the reserved media type for the Helm chart manifest config + ConfigMediaType = "application/vnd.cncf.helm.config.v1+json" + + // ChartLayerMediaType is the reserved media type for Helm chart package content + ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + + // ProvLayerMediaType is the reserved media type for Helm chart provenance files + ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov" + + // LegacyChartLayerMediaType is the legacy reserved media type for Helm chart package content. + LegacyChartLayerMediaType = "application/tar+gzip" +) diff --git a/vendor/helm.sh/helm/v3/pkg/registry/fallback.go b/vendor/helm.sh/helm/v3/pkg/registry/fallback.go new file mode 100644 index 000000000000..000237cbc313 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/fallback.go @@ -0,0 +1,77 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "crypto/tls" + "net/http" + "sync/atomic" +) + +// NOTE(terryhowe): This fallback feature is only provided in v3 for backward +// compatibility. ORAS v1 had this feature and this code was added when helm +// updated to ORAS v2. This will not be supported in helm v4. + +type fallbackTransport struct { + Base http.RoundTripper + forceHTTP atomic.Bool +} + +func newTransport() *fallbackTransport { + type cloner[T any] interface { + Clone() T + } + // try to copy (clone) the http.DefaultTransport so any mutations we + // perform on it (e.g. TLS config) are not reflected globally + // follow https://github.com/golang/go/issues/39299 for a more elegant + // solution in the future + baseTransport := http.DefaultTransport + if t, ok := baseTransport.(cloner[*http.Transport]); ok { + baseTransport = t.Clone() + } else if t, ok := baseTransport.(cloner[http.RoundTripper]); ok { + // this branch will not be used with go 1.20, it was added + // optimistically to try to clone if the http.DefaultTransport + // implementation changes, still the Clone method in that case + // might not return http.RoundTripper... + baseTransport = t.Clone() + } + + return &fallbackTransport{ + Base: baseTransport, + } +} + +// RoundTrip wraps base round trip with conditional insecure retry. +func (t *fallbackTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if ok := t.forceHTTP.Load(); ok { + req.URL.Scheme = "http" + return t.Base.RoundTrip(req) + } + resp, err := t.Base.RoundTrip(req) + // We are falling back to http here for backward compatibility with Helm v3. + // ORAS v1 provided fallback automatically, but ORAS v2 does not. + if err != nil && req.URL.Scheme == "https" { + if tlsErr, ok := err.(tls.RecordHeaderError); ok { + if string(tlsErr.RecordHeader[:]) == "HTTP/" { + t.forceHTTP.Store(true) + req.URL.Scheme = "http" + return t.Base.RoundTrip(req) + } + } + } + return resp, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/reference.go b/vendor/helm.sh/helm/v3/pkg/registry/reference.go new file mode 100644 index 000000000000..b5677761da77 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/reference.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "strings" + + "oras.land/oras-go/v2/registry" +) + +type reference struct { + orasReference registry.Reference + Registry string + Repository string + Tag string + Digest string +} + +// newReference will parse and validate the reference, and clean tags when +// applicable tags are only cleaned when plus (+) signs are present, and are +// converted to underscores (_) before pushing +// See https://github.com/helm/helm/issues/10166 +func newReference(raw string) (result reference, err error) { + // Remove oci:// prefix if it is there + raw = strings.TrimPrefix(raw, OCIScheme+"://") + + // The sole possible reference modification is replacing plus (+) signs + // present in tags with underscores (_). To do this properly, we first + // need to identify a tag, and then pass it on to the reference parser + // NOTE: Passing immediately to the reference parser will fail since (+) + // signs are an invalid tag character, and simply replacing all plus (+) + // occurrences could invalidate other portions of the URI + lastIndex := strings.LastIndex(raw, "@") + if lastIndex >= 0 { + result.Digest = raw[(lastIndex + 1):] + raw = raw[:lastIndex] + } + parts := strings.Split(raw, ":") + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { + tag := parts[len(parts)-1] + + if tag != "" { + // Replace any plus (+) signs with known underscore (_) conversion + newTag := strings.ReplaceAll(tag, "+", "_") + raw = strings.ReplaceAll(raw, tag, newTag) + } + } + + result.orasReference, err = registry.ParseReference(raw) + if err != nil { + return result, err + } + result.Registry = result.orasReference.Registry + result.Repository = result.orasReference.Repository + result.Tag = result.orasReference.Reference + return result, nil +} + +func (r *reference) String() string { + if r.Tag == "" { + return r.orasReference.String() + "@" + r.Digest + } + return r.orasReference.String() +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/util.go b/vendor/helm.sh/helm/v3/pkg/registry/util.go new file mode 100644 index 000000000000..84ee698072b3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/util.go @@ -0,0 +1,208 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "time" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + helmtime "helm.sh/helm/v3/pkg/time" + + "github.com/Masterminds/semver/v3" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var immutableOciAnnotations = []string{ + ocispec.AnnotationVersion, + ocispec.AnnotationTitle, +} + +// IsOCI determines whether a URL is to be treated as an OCI URL +func IsOCI(url string) bool { + return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme)) +} + +// ContainsTag determines whether a tag is found in a provided list of tags +func ContainsTag(tags []string, tag string) bool { + for _, t := range tags { + if tag == t { + return true + } + } + return false +} + +func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (string, error) { + var constraint *semver.Constraints + if versionString == "" { + // If string is empty, set wildcard constraint + constraint, _ = semver.NewConstraint("*") + } else { + // when customer inputs specific version, check whether there's an exact match first + for _, v := range tags { + if versionString == v { + return v, nil + } + } + + // Otherwise set constraint to the string given + var err error + constraint, err = semver.NewConstraint(versionString) + if err != nil { + return "", err + } + } + + // Otherwise try to find the first available version matching the string, + // in case it is a constraint + for _, v := range tags { + test, err := semver.NewVersion(v) + if err != nil { + continue + } + if constraint.Check(test) { + return v, nil + } + } + + return "", errors.Errorf("Could not locate a version matching provided version string %s", versionString) +} + +// extractChartMeta is used to extract a chart metadata from a byte array +func extractChartMeta(chartData []byte) (*chart.Metadata, error) { + ch, err := loader.LoadArchive(bytes.NewReader(chartData)) + if err != nil { + return nil, err + } + return ch.Metadata, nil +} + +// NewRegistryClientWithTLS is a helper function to create a new registry client with TLS enabled. +func NewRegistryClientWithTLS(out io.Writer, certFile, keyFile, caFile string, insecureSkipTLSverify bool, registryConfig string, debug bool) (*Client, error) { + tlsConf, err := tlsutil.NewClientTLS(certFile, keyFile, caFile, insecureSkipTLSverify) + if err != nil { + return nil, fmt.Errorf("can't create TLS config for client: %s", err) + } + // Create a new registry client + registryClient, err := NewClient( + ClientOptDebug(debug), + ClientOptEnableCache(true), + ClientOptWriter(out), + ClientOptCredentialsFile(registryConfig), + ClientOptHTTPClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConf, + Proxy: http.ProxyFromEnvironment, + }, + }), + ) + if err != nil { + return nil, err + } + return registryClient, nil +} + +// generateOCIAnnotations will generate OCI annotations to include within the OCI manifest +func generateOCIAnnotations(meta *chart.Metadata, creationTime string) map[string]string { + + // Get annotations from Chart attributes + ociAnnotations := generateChartOCIAnnotations(meta, creationTime) + + // Copy Chart annotations +annotations: + for chartAnnotationKey, chartAnnotationValue := range meta.Annotations { + + // Avoid overriding key properties + for _, immutableOciKey := range immutableOciAnnotations { + if immutableOciKey == chartAnnotationKey { + continue annotations + } + } + + // Add chart annotation + ociAnnotations[chartAnnotationKey] = chartAnnotationValue + } + + return ociAnnotations +} + +// getChartOCIAnnotations will generate OCI annotations from the provided chart +func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[string]string { + chartOCIAnnotations := map[string]string{} + + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationDescription, meta.Description) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationTitle, meta.Name) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationVersion, meta.Version) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationURL, meta.Home) + + if len(creationTime) == 0 { + creationTime = helmtime.Now().UTC().Format(time.RFC3339) + } + + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationCreated, creationTime) + + if len(meta.Sources) > 0 { + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationSource, meta.Sources[0]) + } + + if len(meta.Maintainers) > 0 { + var maintainerSb strings.Builder + + for maintainerIdx, maintainer := range meta.Maintainers { + + if len(maintainer.Name) > 0 { + maintainerSb.WriteString(maintainer.Name) + } + + if len(maintainer.Email) > 0 { + maintainerSb.WriteString(" (") + maintainerSb.WriteString(maintainer.Email) + maintainerSb.WriteString(")") + } + + if maintainerIdx < len(meta.Maintainers)-1 { + maintainerSb.WriteString(", ") + } + + } + + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationAuthors, maintainerSb.String()) + + } + + return chartOCIAnnotations +} + +// addToMap takes an existing map and adds an item if the value is not empty +func addToMap(inputMap map[string]string, newKey string, newValue string) map[string]string { + + // Add item to map if its + if len(strings.TrimSpace(newValue)) > 0 { + inputMap[newKey] = newValue + } + + return inputMap + +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/hook.go b/vendor/helm.sh/helm/v3/pkg/release/hook.go new file mode 100644 index 000000000000..425074ac1d45 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/hook.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "helm.sh/helm/v3/pkg/time" +) + +// HookEvent specifies the hook event +type HookEvent string + +// Hook event types +const ( + HookPreInstall HookEvent = "pre-install" + HookPostInstall HookEvent = "post-install" + HookPreDelete HookEvent = "pre-delete" + HookPostDelete HookEvent = "post-delete" + HookPreUpgrade HookEvent = "pre-upgrade" + HookPostUpgrade HookEvent = "post-upgrade" + HookPreRollback HookEvent = "pre-rollback" + HookPostRollback HookEvent = "post-rollback" + HookTest HookEvent = "test" +) + +func (x HookEvent) String() string { return string(x) } + +// HookDeletePolicy specifies the hook delete policy +type HookDeletePolicy string + +// Hook delete policy types +const ( + HookSucceeded HookDeletePolicy = "hook-succeeded" + HookFailed HookDeletePolicy = "hook-failed" + HookBeforeHookCreation HookDeletePolicy = "before-hook-creation" +) + +func (x HookDeletePolicy) String() string { return string(x) } + +// HookOutputLogPolicy specifies the hook output log policy +type HookOutputLogPolicy string + +// Hook output log policy types +const ( + HookOutputOnSucceeded HookOutputLogPolicy = "hook-succeeded" + HookOutputOnFailed HookOutputLogPolicy = "hook-failed" +) + +func (x HookOutputLogPolicy) String() string { return string(x) } + +// HookAnnotation is the label name for a hook +const HookAnnotation = "helm.sh/hook" + +// HookWeightAnnotation is the label name for a hook weight +const HookWeightAnnotation = "helm.sh/hook-weight" + +// HookDeleteAnnotation is the label name for the delete policy for a hook +const HookDeleteAnnotation = "helm.sh/hook-delete-policy" + +// HookOutputLogAnnotation is the label name for the output log policy for a hook +const HookOutputLogAnnotation = "helm.sh/hook-output-log-policy" + +// Hook defines a hook object. +type Hook struct { + Name string `json:"name,omitempty"` + // Kind is the Kubernetes kind. + Kind string `json:"kind,omitempty"` + // Path is the chart-relative path to the template. + Path string `json:"path,omitempty"` + // Manifest is the manifest contents. + Manifest string `json:"manifest,omitempty"` + // Events are the events that this hook fires on. + Events []HookEvent `json:"events,omitempty"` + // LastRun indicates the date/time this was last run. + LastRun HookExecution `json:"last_run,omitempty"` + // Weight indicates the sort order for execution among similar Hook type + Weight int `json:"weight,omitempty"` + // DeletePolicies are the policies that indicate when to delete the hook + DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"` + // OutputLogPolicies defines whether we should copy hook logs back to main process + OutputLogPolicies []HookOutputLogPolicy `json:"output_log_policies,omitempty"` +} + +// A HookExecution records the result for the last execution of a hook for a given release. +type HookExecution struct { + // StartedAt indicates the date/time this hook was started + StartedAt time.Time `json:"started_at,omitempty"` + // CompletedAt indicates the date/time this hook was completed. + CompletedAt time.Time `json:"completed_at,omitempty"` + // Phase indicates whether the hook completed successfully + Phase HookPhase `json:"phase"` +} + +// A HookPhase indicates the state of a hook execution +type HookPhase string + +const ( + // HookPhaseUnknown indicates that a hook is in an unknown state + HookPhaseUnknown HookPhase = "Unknown" + // HookPhaseRunning indicates that a hook is currently executing + HookPhaseRunning HookPhase = "Running" + // HookPhaseSucceeded indicates that hook execution succeeded + HookPhaseSucceeded HookPhase = "Succeeded" + // HookPhaseFailed indicates that hook execution failed + HookPhaseFailed HookPhase = "Failed" +) + +// String converts a hook phase to a printable string +func (x HookPhase) String() string { return string(x) } diff --git a/vendor/helm.sh/helm/v3/pkg/release/info.go b/vendor/helm.sh/helm/v3/pkg/release/info.go new file mode 100644 index 000000000000..b030a8a545a9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/info.go @@ -0,0 +1,40 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "k8s.io/apimachinery/pkg/runtime" + + "helm.sh/helm/v3/pkg/time" +) + +// Info describes release information. +type Info struct { + // FirstDeployed is when the release was first deployed. + FirstDeployed time.Time `json:"first_deployed,omitempty"` + // LastDeployed is when the release was last deployed. + LastDeployed time.Time `json:"last_deployed,omitempty"` + // Deleted tracks when this object was deleted. + Deleted time.Time `json:"deleted"` + // Description is human-friendly "log entry" about this release. + Description string `json:"description,omitempty"` + // Status is the current state of the release + Status Status `json:"status,omitempty"` + // Contains the rendered templates/NOTES.txt if available + Notes string `json:"notes,omitempty"` + // Contains the deployed resources information + Resources map[string][]runtime.Object `json:"resources,omitempty"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/mock.go b/vendor/helm.sh/helm/v3/pkg/release/mock.go new file mode 100644 index 000000000000..eb0b5157df00 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/mock.go @@ -0,0 +1,134 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "fmt" + "math/rand" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/time" +) + +// MockHookTemplate is the hook template used for all mock release objects. +var MockHookTemplate = `apiVersion: v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-install +` + +// MockManifest is the manifest used for all mock release objects. +var MockManifest = `apiVersion: v1 +kind: Secret +metadata: + name: fixture +` + +// MockReleaseOptions allows for user-configurable options on mock release objects. +type MockReleaseOptions struct { + Name string + Version int + Chart *chart.Chart + Status Status + Namespace string +} + +// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing. +func Mock(opts *MockReleaseOptions) *Release { + date := time.Unix(242085845, 0).UTC() + + name := opts.Name + if name == "" { + name = "testrelease-" + fmt.Sprint(rand.Intn(100)) + } + + version := 1 + if opts.Version != 0 { + version = opts.Version + } + + namespace := opts.Namespace + if namespace == "" { + namespace = "default" + } + + ch := opts.Chart + if opts.Chart == nil { + ch = &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "foo", + Version: "0.1.0-beta.1", + AppVersion: "1.0", + Annotations: map[string]string{ + "category": "web-apps", + "supported": "true", + }, + Dependencies: []*chart.Dependency{ + { + Name: "cool-plugin", + Version: "1.0.0", + Repository: "https://coolplugin.io/charts", + Condition: "coolPlugin.enabled", + Enabled: true, + }, + { + Name: "crds", + Version: "2.7.1", + Condition: "crds.enabled", + }, + }, + }, + Templates: []*chart.File{ + {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, + }, + } + } + + scode := StatusDeployed + if len(opts.Status) > 0 { + scode = opts.Status + } + + info := &Info{ + FirstDeployed: date, + LastDeployed: date, + Status: scode, + Description: "Release mock", + Notes: "Some mock release notes!", + } + + return &Release{ + Name: name, + Info: info, + Chart: ch, + Config: map[string]interface{}{"name": "value"}, + Version: version, + Namespace: namespace, + Hooks: []*Hook{ + { + Name: "pre-install-hook", + Kind: "Job", + Path: "pre-install-hook.yaml", + Manifest: MockHookTemplate, + LastRun: HookExecution{}, + Events: []HookEvent{HookPreInstall}, + }, + }, + Manifest: MockManifest, + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/release.go b/vendor/helm.sh/helm/v3/pkg/release/release.go new file mode 100644 index 000000000000..b90612873626 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/release.go @@ -0,0 +1,49 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import "helm.sh/helm/v3/pkg/chart" + +// Release describes a deployment of a chart, together with the chart +// and the variables used to deploy that chart. +type Release struct { + // Name is the name of the release + Name string `json:"name,omitempty"` + // Info provides information about a release + Info *Info `json:"info,omitempty"` + // Chart is the chart that was released. + Chart *chart.Chart `json:"chart,omitempty"` + // Config is the set of extra Values added to the chart. + // These values override the default values inside of the chart. + Config map[string]interface{} `json:"config,omitempty"` + // Manifest is the string representation of the rendered template. + Manifest string `json:"manifest,omitempty"` + // Hooks are all of the hooks declared for this release. + Hooks []*Hook `json:"hooks,omitempty"` + // Version is an int which represents the revision of the release. + Version int `json:"version,omitempty"` + // Namespace is the kubernetes namespace of the release. + Namespace string `json:"namespace,omitempty"` + // Labels of the release. + // Disabled encoding into Json cause labels are stored in storage driver metadata field. + Labels map[string]string `json:"-"` +} + +// SetStatus is a helper for setting the status on a release. +func (r *Release) SetStatus(status Status, msg string) { + r.Info.Status = status + r.Info.Description = msg +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/responses.go b/vendor/helm.sh/helm/v3/pkg/release/responses.go new file mode 100644 index 000000000000..7ee1fc2eeef8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/responses.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +// UninstallReleaseResponse represents a successful response to an uninstall request. +type UninstallReleaseResponse struct { + // Release is the release that was marked deleted. + Release *Release `json:"release,omitempty"` + // Info is an uninstall message + Info string `json:"info,omitempty"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/status.go b/vendor/helm.sh/helm/v3/pkg/release/status.go new file mode 100644 index 000000000000..edd27a5f1400 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/status.go @@ -0,0 +1,49 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +// Status is the status of a release +type Status string + +// Describe the status of a release +// NOTE: Make sure to update cmd/helm/status.go when adding or modifying any of these statuses. +const ( + // StatusUnknown indicates that a release is in an uncertain state. + StatusUnknown Status = "unknown" + // StatusDeployed indicates that the release has been pushed to Kubernetes. + StatusDeployed Status = "deployed" + // StatusUninstalled indicates that a release has been uninstalled from Kubernetes. + StatusUninstalled Status = "uninstalled" + // StatusSuperseded indicates that this release object is outdated and a newer one exists. + StatusSuperseded Status = "superseded" + // StatusFailed indicates that the release was not successfully deployed. + StatusFailed Status = "failed" + // StatusUninstalling indicates that an uninstall operation is underway. + StatusUninstalling Status = "uninstalling" + // StatusPendingInstall indicates that an install operation is underway. + StatusPendingInstall Status = "pending-install" + // StatusPendingUpgrade indicates that an upgrade operation is underway. + StatusPendingUpgrade Status = "pending-upgrade" + // StatusPendingRollback indicates that a rollback operation is underway. + StatusPendingRollback Status = "pending-rollback" +) + +func (x Status) String() string { return string(x) } + +// IsPending determines if this status is a state or a transition. +func (x Status) IsPending() bool { + return x == StatusPendingInstall || x == StatusPendingUpgrade || x == StatusPendingRollback +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go new file mode 100644 index 000000000000..dbd0df8e2dc5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" + +import rspb "helm.sh/helm/v3/pkg/release" + +// FilterFunc returns true if the release object satisfies +// the predicate of the underlying filter func. +type FilterFunc func(*rspb.Release) bool + +// Check applies the FilterFunc to the release object. +func (fn FilterFunc) Check(rls *rspb.Release) bool { + if rls == nil { + return false + } + return fn(rls) +} + +// Filter applies the filter(s) to the list of provided releases +// returning the list that satisfies the filtering predicate. +func (fn FilterFunc) Filter(rels []*rspb.Release) (rets []*rspb.Release) { + for _, rel := range rels { + if fn.Check(rel) { + rets = append(rets, rel) + } + } + return +} + +// Any returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 || f1 || ... || fn'. +func Any(filters ...FilterFunc) FilterFunc { + return func(rls *rspb.Release) bool { + for _, filter := range filters { + if filter(rls) { + return true + } + } + return false + } +} + +// All returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 && f1 && ... && fn'. +func All(filters ...FilterFunc) FilterFunc { + return func(rls *rspb.Release) bool { + for _, filter := range filters { + if !filter(rls) { + return false + } + } + return true + } +} + +// StatusFilter filters a set of releases by status code. +func StatusFilter(status rspb.Status) FilterFunc { + return FilterFunc(func(rls *rspb.Release) bool { + if rls == nil { + return true + } + return rls.Info.Status == status + }) +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go new file mode 100644 index 000000000000..bb8e84dda8f4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "sort" + + "helm.sh/helm/v3/pkg/release" +) + +// KindSortOrder is an ordering of Kinds. +type KindSortOrder []string + +// InstallOrder is the order in which manifests should be installed (by Kind). +// +// Those occurring earlier in the list get installed before those occurring later in the list. +var InstallOrder KindSortOrder = []string{ + "PriorityClass", + "Namespace", + "NetworkPolicy", + "ResourceQuota", + "LimitRange", + "PodSecurityPolicy", + "PodDisruptionBudget", + "ServiceAccount", + "Secret", + "SecretList", + "ConfigMap", + "StorageClass", + "PersistentVolume", + "PersistentVolumeClaim", + "CustomResourceDefinition", + "ClusterRole", + "ClusterRoleList", + "ClusterRoleBinding", + "ClusterRoleBindingList", + "Role", + "RoleList", + "RoleBinding", + "RoleBindingList", + "Service", + "DaemonSet", + "Pod", + "ReplicationController", + "ReplicaSet", + "Deployment", + "HorizontalPodAutoscaler", + "StatefulSet", + "Job", + "CronJob", + "IngressClass", + "Ingress", + "APIService", +} + +// UninstallOrder is the order in which manifests should be uninstalled (by Kind). +// +// Those occurring earlier in the list get uninstalled before those occurring later in the list. +var UninstallOrder KindSortOrder = []string{ + "APIService", + "Ingress", + "IngressClass", + "Service", + "CronJob", + "Job", + "StatefulSet", + "HorizontalPodAutoscaler", + "Deployment", + "ReplicaSet", + "ReplicationController", + "Pod", + "DaemonSet", + "RoleBindingList", + "RoleBinding", + "RoleList", + "Role", + "ClusterRoleBindingList", + "ClusterRoleBinding", + "ClusterRoleList", + "ClusterRole", + "CustomResourceDefinition", + "PersistentVolumeClaim", + "PersistentVolume", + "StorageClass", + "ConfigMap", + "SecretList", + "Secret", + "ServiceAccount", + "PodDisruptionBudget", + "PodSecurityPolicy", + "LimitRange", + "ResourceQuota", + "NetworkPolicy", + "Namespace", + "PriorityClass", +} + +// sort manifests by kind. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest { + sort.SliceStable(manifests, func(i, j int) bool { + return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering) + }) + + return manifests +} + +// sort hooks by kind, using an out-of-place sort to preserve the input parameters. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook { + h := hooks + sort.SliceStable(h, func(i, j int) bool { + return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering) + }) + + return h +} + +func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { + ordering := make(map[string]int, len(o)) + for v, k := range o { + ordering[k] = v + } + + first, aok := ordering[kindA] + second, bok := ordering[kindB] + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind, keep original order if same kind + if kindA != kindB { + return kindA < kindB + } + return first < second + } + // unknown kind is last + if !aok { + return false + } + if !bok { + return true + } + // sort different kinds, keep original order if same priority + return first < second +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go new file mode 100644 index 000000000000..0b04a4599b26 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go @@ -0,0 +1,72 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// SimpleHead defines what the structure of the head of a manifest file +type SimpleHead struct { + Version string `json:"apiVersion"` + Kind string `json:"kind,omitempty"` + Metadata *struct { + Name string `json:"name"` + Annotations map[string]string `json:"annotations"` + } `json:"metadata,omitempty"` +} + +var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") + +// SplitManifests takes a string of manifest and returns a map contains individual manifests +func SplitManifests(bigFile string) map[string]string { + // Basically, we're quickly splitting a stream of YAML documents into an + // array of YAML docs. The file name is just a place holder, but should be + // integer-sortable so that manifests get output in the same order as the + // input (see `BySplitManifestsOrder`). + tpl := "manifest-%d" + res := map[string]string{} + // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. + bigFileTmp := strings.TrimSpace(bigFile) + docs := sep.Split(bigFileTmp, -1) + var count int + for _, d := range docs { + if d == "" { + continue + } + + d = strings.TrimSpace(d) + res[fmt.Sprintf(tpl, count)] = d + count = count + 1 + } + return res +} + +// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests` +type BySplitManifestsOrder []string + +func (a BySplitManifestsOrder) Len() int { return len(a) } +func (a BySplitManifestsOrder) Less(i, j int) bool { + // Split `manifest-%d` + anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0) + bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0) + return anum < bnum +} +func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go new file mode 100644 index 000000000000..cf851baa8187 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go @@ -0,0 +1,244 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "log" + "path" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// Manifest represents a manifest file, which has a name and some content. +type Manifest struct { + Name string + Content string + Head *SimpleHead +} + +// manifestFile represents a file that contains a manifest. +type manifestFile struct { + entries map[string]string + path string +} + +// result is an intermediate structure used during sorting. +type result struct { + hooks []*release.Hook + generic []Manifest +} + +// TODO: Refactor this out. It's here because naming conventions were not followed through. +// So fix the Test hook names and then remove this. +var events = map[string]release.HookEvent{ + release.HookPreInstall.String(): release.HookPreInstall, + release.HookPostInstall.String(): release.HookPostInstall, + release.HookPreDelete.String(): release.HookPreDelete, + release.HookPostDelete.String(): release.HookPostDelete, + release.HookPreUpgrade.String(): release.HookPreUpgrade, + release.HookPostUpgrade.String(): release.HookPostUpgrade, + release.HookPreRollback.String(): release.HookPreRollback, + release.HookPostRollback.String(): release.HookPostRollback, + release.HookTest.String(): release.HookTest, + // Support test-success for backward compatibility with Helm 2 tests + "test-success": release.HookTest, +} + +// SortManifests takes a map of filename/YAML contents, splits the file +// by manifest entries, and sorts the entries into hook types. +// +// The resulting hooks struct will be populated with all of the generated hooks. +// Any file that does not declare one of the hook types will be placed in the +// 'generic' bucket. +// +// Files that do not parse into the expected format are simply placed into a map and +// returned. +func SortManifests(files map[string]string, _ chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { + result := &result{} + + var sortedFilePaths []string + for filePath := range files { + sortedFilePaths = append(sortedFilePaths, filePath) + } + sort.Strings(sortedFilePaths) + + for _, filePath := range sortedFilePaths { + content := files[filePath] + + // Skip partials. We could return these as a separate map, but there doesn't + // seem to be any need for that at this time. + if strings.HasPrefix(path.Base(filePath), "_") { + continue + } + // Skip empty files and log this. + if strings.TrimSpace(content) == "" { + continue + } + + manifestFile := &manifestFile{ + entries: SplitManifests(content), + path: filePath, + } + + if err := manifestFile.sort(result); err != nil { + return result.hooks, result.generic, err + } + } + + return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil +} + +// sort takes a manifestFile object which may contain multiple resource definition +// entries and sorts each entry by hook types, and saves the resulting hooks and +// generic manifests (or non-hooks) to the result struct. +// +// To determine hook type, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook: pre-install +// +// To determine the policy to delete the hook, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-delete-policy: hook-succeeded +// +// To determine the policy to output logs of the hook (for Pod and Job only), it looks for a YAML structure like this: +// +// kind: Pod +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-output-log-policy: hook-succeeded,hook-failed +func (file *manifestFile) sort(result *result) error { + // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) + var sortedEntryKeys []string + for entryKey := range file.entries { + sortedEntryKeys = append(sortedEntryKeys, entryKey) + } + sort.Sort(BySplitManifestsOrder(sortedEntryKeys)) + + for _, entryKey := range sortedEntryKeys { + m := file.entries[entryKey] + + var entry SimpleHead + if err := yaml.Unmarshal([]byte(m), &entry); err != nil { + return errors.Wrapf(err, "YAML parse error on %s", file.path) + } + + if !hasAnyAnnotation(entry) { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation] + if !ok { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hw := calculateHookWeight(entry) + + h := &release.Hook{ + Name: entry.Metadata.Name, + Kind: entry.Kind, + Path: file.path, + Manifest: m, + Events: []release.HookEvent{}, + Weight: hw, + DeletePolicies: []release.HookDeletePolicy{}, + OutputLogPolicies: []release.HookOutputLogPolicy{}, + } + + isUnknownHook := false + for _, hookType := range strings.Split(hookTypes, ",") { + hookType = strings.ToLower(strings.TrimSpace(hookType)) + e, ok := events[hookType] + if !ok { + isUnknownHook = true + break + } + h.Events = append(h.Events, e) + } + + if isUnknownHook { + log.Printf("info: skipping unknown hook: %q", hookTypes) + continue + } + + result.hooks = append(result.hooks, h) + + operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) { + h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value)) + }) + + operateAnnotationValues(entry, release.HookOutputLogAnnotation, func(value string) { + h.OutputLogPolicies = append(h.OutputLogPolicies, release.HookOutputLogPolicy(value)) + }) + } + + return nil +} + +// hasAnyAnnotation returns true if the given entry has any annotations at all. +func hasAnyAnnotation(entry SimpleHead) bool { + return entry.Metadata != nil && + entry.Metadata.Annotations != nil && + len(entry.Metadata.Annotations) != 0 +} + +// calculateHookWeight finds the weight in the hook weight annotation. +// +// If no weight is found, the assigned weight is 0 +func calculateHookWeight(entry SimpleHead) int { + hws := entry.Metadata.Annotations[release.HookWeightAnnotation] + hw, err := strconv.Atoi(hws) + if err != nil { + hw = 0 + } + return hw +} + +// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation +func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) { + if dps, ok := entry.Metadata.Annotations[annotation]; ok { + for _, dp := range strings.Split(dps, ",") { + dp = strings.ToLower(strings.TrimSpace(dp)) + operate(dp) + } + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go new file mode 100644 index 000000000000..1a8aa78a6218 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" + +import ( + "sort" + + rspb "helm.sh/helm/v3/pkg/release" +) + +type list []*rspb.Release + +func (s list) Len() int { return len(s) } +func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// ByName sorts releases by name +type ByName struct{ list } + +// Less compares to releases +func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name } + +// ByDate sorts releases by date +type ByDate struct{ list } + +// Less compares to releases +func (s ByDate) Less(i, j int) bool { + ti := s.list[i].Info.LastDeployed.Unix() + tj := s.list[j].Info.LastDeployed.Unix() + return ti < tj +} + +// ByRevision sorts releases by revision number +type ByRevision struct{ list } + +// Less compares to releases +func (s ByRevision) Less(i, j int) bool { + return s.list[i].Version < s.list[j].Version +} + +// Reverse reverses the list of releases sorted by the sort func. +func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { + sortFn(list) + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +// SortByName returns the list of releases sorted +// in lexicographical order. +func SortByName(list []*rspb.Release) { + sort.Sort(ByName{list}) +} + +// SortByDate returns the list of releases sorted by a +// release's last deployed time (in seconds). +func SortByDate(list []*rspb.Release) { + sort.Sort(ByDate{list}) +} + +// SortByRevision returns the list of releases sorted by a +// release's revision number (release.Version). +func SortByRevision(list []*rspb.Release) { + sort.Sort(ByRevision{list}) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go new file mode 100644 index 000000000000..970e96da2d06 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go @@ -0,0 +1,317 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "helm.sh/helm/v3/pkg/repo" + +import ( + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" +) + +// Entry represents a collection of parameters for chart repository +type Entry struct { + Name string `json:"name"` + URL string `json:"url"` + Username string `json:"username"` + Password string `json:"password"` + CertFile string `json:"certFile"` + KeyFile string `json:"keyFile"` + CAFile string `json:"caFile"` + InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` + PassCredentialsAll bool `json:"pass_credentials_all"` +} + +// ChartRepository represents a chart repository +type ChartRepository struct { + Config *Entry + ChartPaths []string + IndexFile *IndexFile + Client getter.Getter + CachePath string +} + +// NewChartRepository constructs ChartRepository +func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) { + u, err := url.Parse(cfg.URL) + if err != nil { + return nil, errors.Errorf("invalid chart URL format: %s", cfg.URL) + } + + client, err := getters.ByScheme(u.Scheme) + if err != nil { + return nil, errors.Errorf("could not find protocol handler for: %s", u.Scheme) + } + + return &ChartRepository{ + Config: cfg, + IndexFile: NewIndexFile(), + Client: client, + CachePath: helmpath.CachePath("repository"), + }, nil +} + +// Load loads a directory of charts as if it were a repository. +// +// It requires the presence of an index.yaml file in the directory. +// +// Deprecated: remove in Helm 4. +func (r *ChartRepository) Load() error { + dirInfo, err := os.Stat(r.Config.Name) + if err != nil { + return err + } + if !dirInfo.IsDir() { + return errors.Errorf("%q is not a directory", r.Config.Name) + } + + // FIXME: Why are we recursively walking directories? + // FIXME: Why are we not reading the repositories.yaml to figure out + // what repos to use? + filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, _ error) error { + if !f.IsDir() { + if strings.Contains(f.Name(), "-index.yaml") { + i, err := LoadIndexFile(path) + if err != nil { + return err + } + r.IndexFile = i + } else if strings.HasSuffix(f.Name(), ".tgz") { + r.ChartPaths = append(r.ChartPaths, path) + } + } + return nil + }) + return nil +} + +// DownloadIndexFile fetches the index from a repository. +func (r *ChartRepository) DownloadIndexFile() (string, error) { + indexURL, err := ResolveReferenceURL(r.Config.URL, "index.yaml") + if err != nil { + return "", err + } + + resp, err := r.Client.Get(indexURL, + getter.WithURL(r.Config.URL), + getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), + getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) + if err != nil { + return "", err + } + + index, err := io.ReadAll(resp) + if err != nil { + return "", err + } + + indexFile, err := loadIndex(index, r.Config.URL) + if err != nil { + return "", err + } + + // Create the chart list file in the cache directory + var charts strings.Builder + for name := range indexFile.Entries { + fmt.Fprintln(&charts, name) + } + chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)) + os.MkdirAll(filepath.Dir(chartsFile), 0755) + os.WriteFile(chartsFile, []byte(charts.String()), 0644) + + // Create the index file in the cache directory + fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name)) + os.MkdirAll(filepath.Dir(fname), 0755) + return fname, os.WriteFile(fname, index, 0644) +} + +// Index generates an index for the chart repository and writes an index.yaml file. +func (r *ChartRepository) Index() error { + err := r.generateIndex() + if err != nil { + return err + } + return r.saveIndexFile() +} + +func (r *ChartRepository) saveIndexFile() error { + index, err := yaml.Marshal(r.IndexFile) + if err != nil { + return err + } + return os.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644) +} + +func (r *ChartRepository) generateIndex() error { + for _, path := range r.ChartPaths { + ch, err := loader.Load(path) + if err != nil { + return err + } + + digest, err := provenance.DigestFile(path) + if err != nil { + return err + } + + if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) { + if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil { + return errors.Wrapf(err, "failed adding to %s to index", path) + } + } + // TODO: If a chart exists, but has a different Digest, should we error? + } + r.IndexFile.SortEntries() + return nil +} + +// FindChartInRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories +func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) +} + +// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials for the chart repository. +func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, getters) +} + +// FindChartInAuthAndTLSRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials and TLS verify flag for the chart repository. +// TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, insecureSkipTLSverify, false, getters) +} + +// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials, TLS verify flag, and if credentials should +// be passed on to other domains. +// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { + + // Download and write the index file to a temporary location + buf := make([]byte, 20) + rand.Read(buf) + name := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), "/", "-") + + c := Entry{ + URL: repoURL, + Username: username, + Password: password, + PassCredentialsAll: passCredentialsAll, + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + Name: name, + InsecureSkipTLSverify: insecureSkipTLSverify, + } + r, err := NewChartRepository(&c, getters) + if err != nil { + return "", err + } + idx, err := r.DownloadIndexFile() + if err != nil { + return "", errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", repoURL) + } + defer func() { + os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))) + os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))) + }() + + // Read the index file for the repository to get chart information and return chart URL + repoIndex, err := LoadIndexFile(idx) + if err != nil { + return "", err + } + + errMsg := fmt.Sprintf("chart %q", chartName) + if chartVersion != "" { + errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion) + } + cv, err := repoIndex.Get(chartName, chartVersion) + if err != nil { + return "", errors.Errorf("%s not found in %s repository", errMsg, repoURL) + } + + if len(cv.URLs) == 0 { + return "", errors.Errorf("%s has no downloadable URLs", errMsg) + } + + chartURL := cv.URLs[0] + + absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL) + if err != nil { + return "", errors.Wrap(err, "failed to make chart URL absolute") + } + + return absoluteChartURL, nil +} + +// ResolveReferenceURL resolves refURL relative to baseURL. +// If refURL is absolute, it simply returns refURL. +func ResolveReferenceURL(baseURL, refURL string) (string, error) { + parsedRefURL, err := url.Parse(refURL) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %s as URL", refURL) + } + + if parsedRefURL.IsAbs() { + return refURL, nil + } + + parsedBaseURL, err := url.Parse(baseURL) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL) + } + + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + parsedBaseURL.RawPath = strings.TrimSuffix(parsedBaseURL.RawPath, "/") + "/" + parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/" + + resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL) + resolvedURL.RawQuery = parsedBaseURL.RawQuery + return resolvedURL.String(), nil +} + +func (e *Entry) String() string { + buf, err := json.Marshal(e) + if err != nil { + log.Panic(err) + } + return string(buf) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/doc.go b/vendor/helm.sh/helm/v3/pkg/repo/doc.go new file mode 100644 index 000000000000..fc54bbf7afc6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/doc.go @@ -0,0 +1,94 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package repo implements the Helm Chart Repository. + +A chart repository is an HTTP server that provides information on charts. A local +repository cache is an on-disk representation of a chart repository. + +There are two important file formats for chart repositories. + +The first is the 'index.yaml' format, which is expressed like this: + + apiVersion: v1 + entries: + frobnitz: + - created: 2016-09-29T12:14:34.830161306-06:00 + description: This is a frobnitz. + digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: frobnitz + urls: + - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz + version: 1.2.3 + sprocket: + - created: 2016-09-29T12:14:34.830507606-06:00 + description: This is a sprocket" + digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: sprocket + urls: + - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz + version: 1.2.0 + generated: 2016-09-29T12:14:34.829721375-06:00 + +An index.yaml file contains the necessary descriptive information about what +charts are available in a repository, and how to get them. + +The second file format is the repositories.yaml file format. This file is for +facilitating local cached copies of one or more chart repositories. + +The format of a repository.yaml file is: + + apiVersion: v1 + generated: TIMESTAMP + repositories: + - name: stable + url: http://example.com/charts + cache: stable-index.yaml + - name: incubator + url: http://example.com/incubator + cache: incubator-index.yaml + +This file maps three bits of information about a repository: + + - The name the user uses to refer to it + - The fully qualified URL to the repository (index.yaml will be appended) + - The name of the local cachefile + +The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not +backwards compatible with those earlier versions. +*/ +package repo diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go new file mode 100644 index 000000000000..e1ce3c62dd20 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -0,0 +1,416 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo + +import ( + "bytes" + "encoding/json" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/internal/fileutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/provenance" +) + +var indexPath = "index.yaml" + +// APIVersionV1 is the v1 API version for index and repository files. +const APIVersionV1 = "v1" + +var ( + // ErrNoAPIVersion indicates that an API version was not specified. + ErrNoAPIVersion = errors.New("no API version specified") + // ErrNoChartVersion indicates that a chart with the given version is not found. + ErrNoChartVersion = errors.New("no chart version found") + // ErrNoChartName indicates that a chart with the given name is not found. + ErrNoChartName = errors.New("no chart name found") + // ErrEmptyIndexYaml indicates that the content of index.yaml is empty. + ErrEmptyIndexYaml = errors.New("empty index.yaml file") +) + +// ChartVersions is a list of versioned chart references. +// Implements a sorter on Version. +type ChartVersions []*ChartVersion + +// Len returns the length. +func (c ChartVersions) Len() int { return len(c) } + +// Swap swaps the position of two items in the versions slice. +func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Less returns true if the version of entry a is less than the version of entry b. +func (c ChartVersions) Less(a, b int) bool { + // Failed parse pushes to the back. + i, err := semver.NewVersion(c[a].Version) + if err != nil { + return true + } + j, err := semver.NewVersion(c[b].Version) + if err != nil { + return false + } + return i.LessThan(j) +} + +// IndexFile represents the index file in a chart repository +type IndexFile struct { + // This is used ONLY for validation against chartmuseum's index files and is discarded after validation. + ServerInfo map[string]interface{} `json:"serverInfo,omitempty"` + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Entries map[string]ChartVersions `json:"entries"` + PublicKeys []string `json:"publicKeys,omitempty"` + + // Annotations are additional mappings uninterpreted by Helm. They are made available for + // other applications to add information to the index file. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// NewIndexFile initializes an index. +func NewIndexFile() *IndexFile { + return &IndexFile{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Entries: map[string]ChartVersions{}, + PublicKeys: []string{}, + } +} + +// LoadIndexFile takes a file at the given path and returns an IndexFile object +func LoadIndexFile(path string) (*IndexFile, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + i, err := loadIndex(b, path) + if err != nil { + return nil, errors.Wrapf(err, "error loading %s", path) + } + return i, nil +} + +// MustAdd adds a file to the index +// This can leave the index in an unsorted state +func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error { + if i.Entries == nil { + return errors.New("entries not initialized") + } + + if md.APIVersion == "" { + md.APIVersion = chart.APIVersionV1 + } + if err := md.Validate(); err != nil { + return errors.Wrapf(err, "validate failed for %s", filename) + } + + u := filename + if baseURL != "" { + _, file := filepath.Split(filename) + var err error + u, err = urlutil.URLJoin(baseURL, file) + if err != nil { + u = path.Join(baseURL, file) + } + } + cr := &ChartVersion{ + URLs: []string{u}, + Metadata: md, + Digest: digest, + Created: time.Now(), + } + ee := i.Entries[md.Name] + i.Entries[md.Name] = append(ee, cr) + return nil +} + +// Add adds a file to the index and logs an error. +// +// Deprecated: Use index.MustAdd instead. +func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { + if err := i.MustAdd(md, filename, baseURL, digest); err != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err) + } +} + +// Has returns true if the index has an entry for a chart with the given name and exact version. +func (i IndexFile) Has(name, version string) bool { + _, err := i.Get(name, version) + return err == nil +} + +// SortEntries sorts the entries by version in descending order. +// +// In canonical form, the individual version records should be sorted so that +// the most recent release for every version is in the 0th slot in the +// Entries.ChartVersions array. That way, tooling can predict the newest +// version without needing to parse SemVers. +func (i IndexFile) SortEntries() { + for _, versions := range i.Entries { + sort.Sort(sort.Reverse(versions)) + } +} + +// Get returns the ChartVersion for the given name. +// +// If version is empty, this will return the chart with the latest stable version, +// prerelease versions will be skipped. +func (i IndexFile) Get(name, version string) (*ChartVersion, error) { + vs, ok := i.Entries[name] + if !ok { + return nil, ErrNoChartName + } + if len(vs) == 0 { + return nil, ErrNoChartVersion + } + + var constraint *semver.Constraints + if version == "" { + constraint, _ = semver.NewConstraint("*") + } else { + var err error + constraint, err = semver.NewConstraint(version) + if err != nil { + return nil, err + } + } + + // when customer inputs specific version, check whether there's an exact match first + if len(version) != 0 { + for _, ver := range vs { + if version == ver.Version { + return ver, nil + } + } + } + + for _, ver := range vs { + test, err := semver.NewVersion(ver.Version) + if err != nil { + continue + } + + if constraint.Check(test) { + return ver, nil + } + } + return nil, errors.Errorf("no chart version found for %s-%s", name, version) +} + +// WriteFile writes an index file to the given destination path. +// +// The mode on the file is set to 'mode'. +func (i IndexFile) WriteFile(dest string, mode os.FileMode) error { + b, err := yaml.Marshal(i) + if err != nil { + return err + } + return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode) +} + +// WriteJSONFile writes an index file in JSON format to the given destination +// path. +// +// The mode on the file is set to 'mode'. +func (i IndexFile) WriteJSONFile(dest string, mode os.FileMode) error { + b, err := json.MarshalIndent(i, "", " ") + if err != nil { + return err + } + return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode) +} + +// Merge merges the given index file into this index. +// +// This merges by name and version. +// +// If one of the entries in the given index does _not_ already exist, it is added. +// In all other cases, the existing record is preserved. +// +// This can leave the index in an unsorted state +func (i *IndexFile) Merge(f *IndexFile) { + for _, cvs := range f.Entries { + for _, cv := range cvs { + if !i.Has(cv.Name, cv.Version) { + e := i.Entries[cv.Name] + i.Entries[cv.Name] = append(e, cv) + } + } + } +} + +// ChartVersion represents a chart entry in the IndexFile +type ChartVersion struct { + *chart.Metadata + URLs []string `json:"urls"` + Created time.Time `json:"created,omitempty"` + Removed bool `json:"removed,omitempty"` + Digest string `json:"digest,omitempty"` + + // ChecksumDeprecated is deprecated in Helm 3, and therefore ignored. Helm 3 replaced + // this with Digest. However, with a strict YAML parser enabled, a field must be + // present on the struct for backwards compatibility. + ChecksumDeprecated string `json:"checksum,omitempty"` + + // EngineDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict + // YAML parser enabled, this field must be present. + EngineDeprecated string `json:"engine,omitempty"` + + // TillerVersionDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict + // YAML parser enabled, this field must be present. + TillerVersionDeprecated string `json:"tillerVersion,omitempty"` + + // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However, + // with a strict YAML parser enabled, this must be present on the struct. + URLDeprecated string `json:"url,omitempty"` +} + +// IndexDirectory reads a (flat) directory and generates an index. +// +// It indexes only charts that have been packaged (*.tgz). +// +// The index returned will be in an unsorted state +func IndexDirectory(dir, baseURL string) (*IndexFile, error) { + archives, err := filepath.Glob(filepath.Join(dir, "*.tgz")) + if err != nil { + return nil, err + } + moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz")) + if err != nil { + return nil, err + } + archives = append(archives, moreArchives...) + + index := NewIndexFile() + for _, arch := range archives { + fname, err := filepath.Rel(dir, arch) + if err != nil { + return index, err + } + + var parentDir string + parentDir, fname = filepath.Split(fname) + // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out. + parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator)) + parentURL, err := urlutil.URLJoin(baseURL, parentDir) + if err != nil { + parentURL = path.Join(baseURL, parentDir) + } + + c, err := loader.Load(arch) + if err != nil { + // Assume this is not a chart. + continue + } + hash, err := provenance.DigestFile(arch) + if err != nil { + return index, err + } + if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil { + return index, errors.Wrapf(err, "failed adding to %s to index", fname) + } + } + return index, nil +} + +// loadIndex loads an index file and does minimal validity checking. +// +// The source parameter is only used for logging. +// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails. +func loadIndex(data []byte, source string) (*IndexFile, error) { + i := &IndexFile{} + + if len(data) == 0 { + return i, ErrEmptyIndexYaml + } + + if err := jsonOrYamlUnmarshal(data, i); err != nil { + return i, err + } + + for name, cvs := range i.Entries { + for idx := len(cvs) - 1; idx >= 0; idx-- { + if cvs[idx] == nil { + log.Printf("skipping loading invalid entry for chart %q from %s: empty entry", name, source) + continue + } + // When metadata section missing, initialize with no data + if cvs[idx].Metadata == nil { + cvs[idx].Metadata = &chart.Metadata{} + } + if cvs[idx].APIVersion == "" { + cvs[idx].APIVersion = chart.APIVersionV1 + } + if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) + cvs = append(cvs[:idx], cvs[idx+1:]...) + } + } + // adjust slice to only contain a set of valid versions + i.Entries[name] = cvs + } + i.SortEntries() + if i.APIVersion == "" { + return i, ErrNoAPIVersion + } + return i, nil +} + +// jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML +// into the provided interface. +// +// It automatically detects whether the data is in JSON or YAML format by +// checking its validity as JSON. If the data is valid JSON, it will use the +// `encoding/json` package to unmarshal it. Otherwise, it will use the +// `sigs.k8s.io/yaml` package to unmarshal the YAML data. +func jsonOrYamlUnmarshal(b []byte, i interface{}) error { + if json.Valid(b) { + return json.Unmarshal(b, i) + } + return yaml.UnmarshalStrict(b, i) +} + +// ignoreSkippableChartValidationError inspect the given error and returns nil if +// the error isn't important for index loading +// +// In particular, charts may introduce validations that don't impact repository indexes +// And repository indexes may be generated by older/non-compliant software, which doesn't +// conform to all validations. +func ignoreSkippableChartValidationError(err error) error { + verr, ok := err.(chart.ValidationError) + if !ok { + return err + } + + // https://github.com/helm/helm/issues/12748 (JFrog repository strips alias field) + if strings.HasPrefix(verr.Error(), "validation: more than one dependency with name or alias") { + return nil + } + + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/repo.go b/vendor/helm.sh/helm/v3/pkg/repo/repo.go new file mode 100644 index 000000000000..834d554bd93e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/repo.go @@ -0,0 +1,125 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "helm.sh/helm/v3/pkg/repo" + +import ( + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +// File represents the repositories.yaml file +type File struct { + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Repositories []*Entry `json:"repositories"` +} + +// NewFile generates an empty repositories file. +// +// Generated and APIVersion are automatically set. +func NewFile() *File { + return &File{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Repositories: []*Entry{}, + } +} + +// LoadFile takes a file at the given path and returns a File object +func LoadFile(path string) (*File, error) { + r := new(File) + b, err := os.ReadFile(path) + if err != nil { + return r, errors.Wrapf(err, "couldn't load repositories file (%s)", path) + } + + err = yaml.Unmarshal(b, r) + return r, err +} + +// Add adds one or more repo entries to a repo file. +func (r *File) Add(re ...*Entry) { + r.Repositories = append(r.Repositories, re...) +} + +// Update attempts to replace one or more repo entries in a repo file. If an +// entry with the same name doesn't exist in the repo file it will add it. +func (r *File) Update(re ...*Entry) { + for _, target := range re { + r.update(target) + } +} + +func (r *File) update(e *Entry) { + for j, repo := range r.Repositories { + if repo.Name == e.Name { + r.Repositories[j] = e + return + } + } + r.Add(e) +} + +// Has returns true if the given name is already a repository name. +func (r *File) Has(name string) bool { + entry := r.Get(name) + return entry != nil +} + +// Get returns an entry with the given name if it exists, otherwise returns nil +func (r *File) Get(name string) *Entry { + for _, entry := range r.Repositories { + if entry.Name == name { + return entry + } + } + return nil +} + +// Remove removes the entry from the list of repositories. +func (r *File) Remove(name string) bool { + cp := []*Entry{} + found := false + for _, rf := range r.Repositories { + if rf == nil { + continue + } + if rf.Name == name { + found = true + continue + } + cp = append(cp, rf) + } + r.Repositories = cp + return found +} + +// WriteFile writes a repositories file to the given path. +func (r *File) WriteFile(path string, perm os.FileMode) error { + data, err := yaml.Marshal(r) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return os.WriteFile(path, data, perm) +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go new file mode 100644 index 000000000000..ce88c662b0ff --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go @@ -0,0 +1,264 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*ConfigMaps)(nil) + +// ConfigMapsDriverName is the string name of the driver. +const ConfigMapsDriverName = "ConfigMap" + +// ConfigMaps is a wrapper around an implementation of a kubernetes +// ConfigMapsInterface. +type ConfigMaps struct { + impl corev1.ConfigMapInterface + Log func(string, ...interface{}) +} + +// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of +// the kubernetes ConfigMapsInterface. +func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps { + return &ConfigMaps{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (cfgmaps *ConfigMaps) Name() string { + return ConfigMapsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { + // fetch the configmap holding the release named by key + obj, err := cfgmaps.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + + cfgmaps.Log("get: failed to get %q: %s", key, err) + return nil, err + } + // found the configmap, decode the base64 data string + r, err := decodeRelease(obj.Data["release"]) + if err != nil { + cfgmaps.Log("get: failed to decode data %q: %s", key, err) + return nil, err + } + r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + // return the release object + return r, nil +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("list: failed to list: %s", err) + return nil, err + } + + var results []*rspb.Release + + // iterate over the configmaps object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("query: failed to query with labels: %s", err) + return nil, err + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("query: failed to decode release: %s", err) + continue + } + rls.Labels = item.ObjectMeta.Labels + results = append(results, rls) + } + return results, nil +} + +// Create creates a new ConfigMap holding the release. If the +// ConfigMap already exists, ErrReleaseExists is returned. +func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) + + // create a new configmap to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + if _, err := cfgmaps.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + cfgmaps.Log("create: failed to create: %s", err) + return err + } + return nil +} + +// Update updates the ConfigMap holding the release. If not found +// the ConfigMap is created to hold the release. +func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) + + // create a new configmap object to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + if err != nil { + cfgmaps.Log("update: failed to update: %s", err) + return err + } + return nil +} + +// Delete deletes the ConfigMap holding the release named by key. +func (cfgmaps *ConfigMaps) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = cfgmaps.Get(key); err != nil { + return nil, err + } + // delete the release + if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil { + return rls, err + } + return rls, nil +} + +// newConfigMapsObject constructs a kubernetes ConfigMap object +// to store a release. Each configmap data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each configmap: +// +// "modifiedAt" - timestamp indicating when this configmap was last modified. (set in Update) +// "createdAt" - timestamp indicating when this configmap was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the configmap, currently "helm". +// "name" - name of the release. +func newConfigMapsObject(key string, rls *rspb.Release, lbs labels) (*v1.ConfigMap, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply custom labels + lbs.fromMap(rls.Labels) + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return configmap object + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Data: map[string]string{"release": s}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go new file mode 100644 index 000000000000..9c01f376609f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go @@ -0,0 +1,105 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var ( + // ErrReleaseNotFound indicates that a release is not found. + ErrReleaseNotFound = errors.New("release: not found") + // ErrReleaseExists indicates that a release already exists. + ErrReleaseExists = errors.New("release: already exists") + // ErrInvalidKey indicates that a release key could not be parsed. + ErrInvalidKey = errors.New("release: invalid key") + // ErrNoDeployedReleases indicates that there are no releases with the given key in the deployed state + ErrNoDeployedReleases = errors.New("has no deployed releases") +) + +// StorageDriverError records an error and the release name that caused it +type StorageDriverError struct { + ReleaseName string + Err error +} + +func (e *StorageDriverError) Error() string { + return fmt.Sprintf("%q %s", e.ReleaseName, e.Err.Error()) +} + +func (e *StorageDriverError) Unwrap() error { return e.Err } + +func NewErrNoDeployedReleases(releaseName string) error { + return &StorageDriverError{ + ReleaseName: releaseName, + Err: ErrNoDeployedReleases, + } +} + +// Creator is the interface that wraps the Create method. +// +// Create stores the release or returns ErrReleaseExists +// if an identical release already exists. +type Creator interface { + Create(key string, rls *rspb.Release) error +} + +// Updator is the interface that wraps the Update method. +// +// Update updates an existing release or returns +// ErrReleaseNotFound if the release does not exist. +type Updator interface { + Update(key string, rls *rspb.Release) error +} + +// Deletor is the interface that wraps the Delete method. +// +// Delete deletes the release named by key or returns +// ErrReleaseNotFound if the release does not exist. +type Deletor interface { + Delete(key string) (*rspb.Release, error) +} + +// Queryor is the interface that wraps the Get and List methods. +// +// Get returns the release named by key or returns ErrReleaseNotFound +// if the release does not exist. +// +// List returns the set of all releases that satisfy the filter predicate. +// +// Query returns the set of all releases that match the provided label set. +type Queryor interface { + Get(key string) (*rspb.Release, error) + List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) + Query(labels map[string]string) ([]*rspb.Release, error) +} + +// Driver is the interface composed of Creator, Updator, Deletor, and Queryor +// interfaces. It defines the behavior for storing, updating, deleted, +// and retrieving Helm releases from some underlying storage mechanism, +// e.g. memory, configmaps. +type Driver interface { + Creator + Updator + Deletor + Queryor + Name() string +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go new file mode 100644 index 000000000000..eb7118fe5fa6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go @@ -0,0 +1,48 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +// labels is a map of key value pairs to be included as metadata in a configmap object. +type labels map[string]string + +func (lbs *labels) init() { *lbs = labels(make(map[string]string)) } +func (lbs labels) get(key string) string { return lbs[key] } +func (lbs labels) set(key, val string) { lbs[key] = val } + +func (lbs labels) keys() (ls []string) { + for key := range lbs { + ls = append(ls, key) + } + return +} + +func (lbs labels) match(set labels) bool { + for _, key := range set.keys() { + if lbs.get(key) != set.get(key) { + return false + } + } + return true +} + +func (lbs labels) toMap() map[string]string { return lbs } + +func (lbs *labels) fromMap(kvs map[string]string) { + for k, v := range kvs { + lbs.set(k, v) + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go new file mode 100644 index 000000000000..91378f588528 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go @@ -0,0 +1,240 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "strconv" + "strings" + "sync" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Memory)(nil) + +const ( + // MemoryDriverName is the string name of this driver. + MemoryDriverName = "Memory" + + defaultNamespace = "default" +) + +// A map of release names to list of release records +type memReleases map[string]records + +// Memory is the in-memory storage driver implementation. +type Memory struct { + sync.RWMutex + namespace string + // A map of namespaces to releases + cache map[string]memReleases +} + +// NewMemory initializes a new memory driver. +func NewMemory() *Memory { + return &Memory{cache: map[string]memReleases{}, namespace: "default"} +} + +// SetNamespace sets a specific namespace in which releases will be accessed. +// An empty string indicates all namespaces (for the list operation) +func (mem *Memory) SetNamespace(ns string) { + mem.namespace = ns +} + +// Name returns the name of the driver. +func (mem *Memory) Name() string { + return MemoryDriverName +} + +// Get returns the release named by key or returns ErrReleaseNotFound. +func (mem *Memory) Get(key string) (*rspb.Release, error) { + defer unlock(mem.rlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + switch elems := strings.Split(keyWithoutPrefix, ".v"); len(elems) { + case 2: + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Get(key); r != nil { + return r.rls, nil + } + } + return nil, ErrReleaseNotFound + default: + return nil, ErrInvalidKey + } +} + +// List returns the list of all releases such that filter(release) == true +func (mem *Memory) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only list releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + if filter(rec.rls) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only list releases of this namespace + break + } + } + return ls, nil +} + +// Query returns the set of releases that match the provided set of labels +func (mem *Memory) Query(keyvals map[string]string) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var lbs labels + + lbs.init() + lbs.fromMap(keyvals) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only query releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + // A query for a release name that doesn't exist (has been deleted) + // can cause rec to be nil. + if rec == nil { + return false + } + if rec.lbs.match(lbs) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only query releases of this namespace + break + } + } + + if len(ls) == 0 { + return nil, ErrReleaseNotFound + } + + return ls, nil +} + +// Create creates a new release or returns ErrReleaseExists. +func (mem *Memory) Create(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; !ok { + mem.cache[namespace] = memReleases{} + } + + if recs, ok := mem.cache[namespace][rls.Name]; ok { + if err := recs.Add(newRecord(key, rls)); err != nil { + return err + } + mem.cache[namespace][rls.Name] = recs + return nil + } + mem.cache[namespace][rls.Name] = records{newRecord(key, rls)} + return nil +} + +// Update updates a release or returns ErrReleaseNotFound. +func (mem *Memory) Update(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; ok { + if rs, ok := mem.cache[namespace][rls.Name]; ok && rs.Exists(key) { + rs.Replace(key, newRecord(key, rls)) + return nil + } + } + return ErrReleaseNotFound +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (mem *Memory) Delete(key string) (*rspb.Release, error) { + defer unlock(mem.wlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + elems := strings.Split(keyWithoutPrefix, ".v") + + if len(elems) != 2 { + return nil, ErrInvalidKey + } + + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if _, ok := mem.cache[mem.namespace]; ok { + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Remove(key); r != nil { + // recs.Remove changes the slice reference, so we have to re-assign it. + mem.cache[mem.namespace][name] = recs + return r.rls, nil + } + } + } + return nil, ErrReleaseNotFound +} + +// wlock locks mem for writing +func (mem *Memory) wlock() func() { + mem.Lock() + return func() { mem.Unlock() } +} + +// rlock locks mem for reading +func (mem *Memory) rlock() func() { + mem.RLock() + return func() { mem.RUnlock() } +} + +// unlock calls fn which reverses a mem.rlock or mem.wlock. e.g: +// ```defer unlock(mem.rlock())```, locks mem for reading at the +// call point of defer and unlocks upon exiting the block. +func unlock(fn func()) { fn() } diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go new file mode 100644 index 000000000000..9df173384c76 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "sort" + "strconv" + + rspb "helm.sh/helm/v3/pkg/release" +) + +// records holds a list of in-memory release records +type records []*record + +func (rs records) Len() int { return len(rs) } +func (rs records) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } +func (rs records) Less(i, j int) bool { return rs[i].rls.Version < rs[j].rls.Version } + +func (rs *records) Add(r *record) error { + if r == nil { + return nil + } + + if rs.Exists(r.key) { + return ErrReleaseExists + } + + *rs = append(*rs, r) + sort.Sort(*rs) + + return nil +} + +func (rs records) Get(key string) *record { + if i, ok := rs.Index(key); ok { + return rs[i] + } + return nil +} + +func (rs *records) Iter(fn func(int, *record) bool) { + cp := make([]*record, len(*rs)) + copy(cp, *rs) + + for i, r := range cp { + if !fn(i, r) { + return + } + } +} + +func (rs *records) Index(key string) (int, bool) { + for i, r := range *rs { + if r.key == key { + return i, true + } + } + return -1, false +} + +func (rs records) Exists(key string) bool { + _, ok := rs.Index(key) + return ok +} + +func (rs *records) Remove(key string) (r *record) { + if i, ok := rs.Index(key); ok { + return rs.removeAt(i) + } + return nil +} + +func (rs *records) Replace(key string, rec *record) *record { + if i, ok := rs.Index(key); ok { + old := (*rs)[i] + (*rs)[i] = rec + return old + } + return nil +} + +func (rs *records) removeAt(index int) *record { + r := (*rs)[index] + (*rs)[index] = nil + copy((*rs)[index:], (*rs)[index+1:]) + *rs = (*rs)[:len(*rs)-1] + return r +} + +// record is the data structure used to cache releases +// for the in-memory storage driver +type record struct { + key string + lbs labels + rls *rspb.Release +} + +// newRecord creates a new in-memory release record +func newRecord(key string, rls *rspb.Release) *record { + var lbs labels + + lbs.init() + lbs.set("name", rls.Name) + lbs.set("owner", "helm") + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // return &record{key: key, lbs: lbs, rls: proto.Clone(rls).(*rspb.Release)} + return &record{key: key, lbs: lbs, rls: rls} +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go new file mode 100644 index 000000000000..95a7e9032452 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go @@ -0,0 +1,257 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Secrets)(nil) + +// SecretsDriverName is the string name of the driver. +const SecretsDriverName = "Secret" + +// Secrets is a wrapper around an implementation of a kubernetes +// SecretsInterface. +type Secrets struct { + impl corev1.SecretInterface + Log func(string, ...interface{}) +} + +// NewSecrets initializes a new Secrets wrapping an implementation of +// the kubernetes SecretsInterface. +func NewSecrets(impl corev1.SecretInterface) *Secrets { + return &Secrets{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (secrets *Secrets) Name() string { + return SecretsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (secrets *Secrets) Get(key string) (*rspb.Release, error) { + // fetch the secret holding the release named by key + obj, err := secrets.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + return nil, errors.Wrapf(err, "get: failed to get %q", key) + } + // found the secret, decode the base64 data string + r, err := decodeRelease(string(obj.Data["release"])) + r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + return r, errors.Wrapf(err, "get: failed to decode data %q", key) +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// secret fails to retrieve the releases. +func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "list: failed to list") + } + + var results []*rspb.Release + + // iterate over the secrets object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the secret fails to retrieve the releases. +func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "query: failed to query with labels") + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("query: failed to decode release: %s", err) + continue + } + rls.Labels = item.ObjectMeta.Labels + results = append(results, rls) + } + return results, nil +} + +// Create creates a new Secret holding the release. If the +// Secret already exists, ErrReleaseExists is returned. +func (secrets *Secrets) Create(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) + + // create a new secret to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "create: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + return errors.Wrap(err, "create: failed to create") + } + return nil +} + +// Update updates the Secret holding the release. If not found +// the Secret is created to hold the release. +func (secrets *Secrets) Update(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) + + // create a new secret object to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "update: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + _, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + return errors.Wrap(err, "update: failed to update") +} + +// Delete deletes the Secret holding the release named by key. +func (secrets *Secrets) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = secrets.Get(key); err != nil { + return nil, err + } + // delete the release + err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{}) + return rls, err +} + +// newSecretsObject constructs a kubernetes Secret object +// to store a release. Each secret data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each secret: +// +// "modifiedAt" - timestamp indicating when this secret was last modified. (set in Update) +// "createdAt" - timestamp indicating when this secret was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the secret, currently "helm". +// "name" - name of the release. +func newSecretsObject(key string, rls *rspb.Release, lbs labels) (*v1.Secret, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply custom labels + lbs.fromMap(rls.Labels) + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return secret object. + // Helm 3 introduced setting the 'Type' field + // in the Kubernetes storage object. + // Helm defines the field content as follows: + // /.v + // Type field for Helm 3: helm.sh/release.v1 + // Note: Version starts at 'v1' for Helm 3 and + // should be incremented if the release object + // metadata is modified. + // This would potentially be a breaking change + // and should only happen between major versions. + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Type: "helm.sh/release.v1", + Data: map[string][]byte{"release": []byte(s)}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go new file mode 100644 index 000000000000..33bde9b6a494 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go @@ -0,0 +1,697 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + "sort" + "strconv" + "time" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" + + sq "github.com/Masterminds/squirrel" + + // Import pq for postgres dialect + _ "github.com/lib/pq" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*SQL)(nil) + +var labelMap = map[string]struct{}{ + "modifiedAt": {}, + "createdAt": {}, + "version": {}, + "status": {}, + "owner": {}, + "name": {}, +} + +const postgreSQLDialect = "postgres" + +// SQLDriverName is the string name of this driver. +const SQLDriverName = "SQL" + +const sqlReleaseTableName = "releases_v1" +const sqlCustomLabelsTableName = "custom_labels_v1" + +const ( + sqlReleaseTableKeyColumn = "key" + sqlReleaseTableTypeColumn = "type" + sqlReleaseTableBodyColumn = "body" + sqlReleaseTableNameColumn = "name" + sqlReleaseTableNamespaceColumn = "namespace" + sqlReleaseTableVersionColumn = "version" + sqlReleaseTableStatusColumn = "status" + sqlReleaseTableOwnerColumn = "owner" + sqlReleaseTableCreatedAtColumn = "createdAt" + sqlReleaseTableModifiedAtColumn = "modifiedAt" + + sqlCustomLabelsTableReleaseKeyColumn = "releaseKey" + sqlCustomLabelsTableReleaseNamespaceColumn = "releaseNamespace" + sqlCustomLabelsTableKeyColumn = "key" + sqlCustomLabelsTableValueColumn = "value" +) + +// Following limits based on k8s labels limits - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set +const ( + sqlCustomLabelsTableKeyMaxLength = 253 + 1 + 63 + sqlCustomLabelsTableValueMaxLength = 63 +) + +const ( + sqlReleaseDefaultOwner = "helm" + sqlReleaseDefaultType = "helm.sh/release.v1" +) + +// SQL is the sql storage driver implementation. +type SQL struct { + db *sqlx.DB + namespace string + statementBuilder sq.StatementBuilderType + + Log func(string, ...interface{}) +} + +// Name returns the name of the driver. +func (s *SQL) Name() string { + return SQLDriverName +} + +// Check if all migrations al +func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { + // make map (set) of ids for fast search + migrationsIDs := make(map[string]struct{}) + for _, migration := range migrations { + migrationsIDs[migration.Id] = struct{}{} + } + + // get list of applied migrations + migrate.SetDisableCreateTable(true) + records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) + migrate.SetDisableCreateTable(false) + if err != nil { + s.Log("checkAlreadyApplied: failed to get migration records: %v", err) + return false + } + + for _, record := range records { + if _, ok := migrationsIDs[record.Id]; ok { + s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) + delete(migrationsIDs, record.Id) + } + } + + // check if all migrations applied + if len(migrationsIDs) != 0 { + for id := range migrationsIDs { + s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id) + } + return false + } + return true +} + +func (s *SQL) ensureDBSetup() error { + + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "init", + Up: []string{ + fmt.Sprintf(` + CREATE TABLE %s ( + %s VARCHAR(90), + %s VARCHAR(64) NOT NULL, + %s TEXT NOT NULL, + %s VARCHAR(64) NOT NULL, + %s VARCHAR(64) NOT NULL, + %s INTEGER NOT NULL, + %s TEXT NOT NULL, + %s TEXT NOT NULL, + %s INTEGER NOT NULL, + %s INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY(%s, %s) + ); + CREATE INDEX ON %s (%s, %s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + + GRANT ALL ON %s TO PUBLIC; + + ALTER TABLE %s ENABLE ROW LEVEL SECURITY; + `, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableVersionColumn, + sqlReleaseTableName, + sqlReleaseTableStatusColumn, + sqlReleaseTableName, + sqlReleaseTableOwnerColumn, + sqlReleaseTableName, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableName, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableName, + sqlReleaseTableName, + ), + }, + Down: []string{ + fmt.Sprintf(` + DROP TABLE %s; + `, sqlReleaseTableName), + }, + }, + { + Id: "custom_labels", + Up: []string{ + fmt.Sprintf(` + CREATE TABLE %s ( + %s VARCHAR(64), + %s VARCHAR(67), + %s VARCHAR(%d), + %s VARCHAR(%d) + ); + CREATE INDEX ON %s (%s, %s); + + GRANT ALL ON %s TO PUBLIC; + ALTER TABLE %s ENABLE ROW LEVEL SECURITY; + `, + sqlCustomLabelsTableName, + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableKeyColumn, + sqlCustomLabelsTableKeyMaxLength, + sqlCustomLabelsTableValueColumn, + sqlCustomLabelsTableValueMaxLength, + sqlCustomLabelsTableName, + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableName, + sqlCustomLabelsTableName, + ), + }, + Down: []string{ + fmt.Sprintf(` + DELETE TABLE %s; + `, sqlCustomLabelsTableName), + }, + }, + }, + } + + // Check that init migration already applied + if s.checkAlreadyApplied(migrations.Migrations) { + return nil + } + + // Populate the database with the relations we need if they don't exist yet + _, err := migrate.Exec(s.db.DB, postgreSQLDialect, migrations, migrate.Up) + return err +} + +// SQLReleaseWrapper describes how Helm releases are stored in an SQL database +type SQLReleaseWrapper struct { + // The primary key, made of {release-name}.{release-version} + Key string `db:"key"` + + // See https://github.com/helm/helm/blob/c9fe3d118caec699eb2565df9838673af379ce12/pkg/storage/driver/secrets.go#L231 + Type string `db:"type"` + + // The rspb.Release body, as a base64-encoded string + Body string `db:"body"` + + // Release "labels" that can be used as filters in the storage.Query(labels map[string]string) + // we implemented. Note that allowing Helm users to filter against new dimensions will require a + // new migration to be added, and the Create and/or update functions to be updated accordingly. + Name string `db:"name"` + Namespace string `db:"namespace"` + Version int `db:"version"` + Status string `db:"status"` + Owner string `db:"owner"` + CreatedAt int `db:"createdAt"` + ModifiedAt int `db:"modifiedAt"` +} + +type SQLReleaseCustomLabelWrapper struct { + ReleaseKey string `db:"release_key"` + ReleaseNamespace string `db:"release_namespace"` + Key string `db:"key"` + Value string `db:"value"` +} + +// NewSQL initializes a new sql driver. +func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) { + db, err := sqlx.Connect(postgreSQLDialect, connectionString) + if err != nil { + return nil, err + } + + driver := &SQL{ + db: db, + Log: logger, + statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), + } + + if err := driver.ensureDBSetup(); err != nil { + return nil, err + } + + driver.namespace = namespace + + return driver, nil +} + +// Get returns the release named by key. +func (s *SQL) Get(key string) (*rspb.Release, error) { + var record SQLReleaseWrapper + + qb := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + + query, args, err := qb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + // Get will return an error if the result is empty + if err := s.db.Get(&record, query, args...); err != nil { + s.Log("got SQL error when getting release %s: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("get: failed to decode data %q: %v", key, err) + return nil, err + } + + if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + return nil, err + } + + return release, nil +} + +// List returns the list of all releases such that filter(release) == true +func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableOwnerColumn: sqlReleaseDefaultOwner}) + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to list: %v", err) + return nil, err + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + + if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + return nil, err + } + for k, v := range getReleaseSystemLabels(release) { + release.Labels[k] = v + } + + if filter(release) { + releases = append(releases, release) + } + } + + return releases, nil +} + +// Query returns the set of releases that match the provided set of labels. +func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn). + From(sqlReleaseTableName) + + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + if _, ok := labelMap[key]; ok { + sb = sb.Where(sq.Eq{key: labels[key]}) + } else { + s.Log("unknown label %s", key) + return nil, fmt.Errorf("unknown label %s", key) + } + } + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + // Build our query + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to query with labels: %v", err) + return nil, err + } + + if len(records) == 0 { + return nil, ErrReleaseNotFound + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + + if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + return nil, err + } + + releases = append(releases, release) + } + + if len(releases) == 0 { + return nil, ErrReleaseNotFound + } + + return releases, nil +} + +// Create creates a new release. +func (s *SQL) Create(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return fmt.Errorf("error beginning transaction: %v", err) + } + + insertQuery, args, err := s.statementBuilder. + Insert(sqlReleaseTableName). + Columns( + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + ). + Values( + key, + sqlReleaseDefaultType, + body, + rls.Name, + namespace, + int(rls.Version), + rls.Info.Status.String(), + sqlReleaseDefaultOwner, + int(time.Now().Unix()), + ).ToSql() + if err != nil { + s.Log("failed to build insert query: %v", err) + return err + } + + if _, err := transaction.Exec(insertQuery, args...); err != nil { + defer transaction.Rollback() + + selectQuery, args, buildErr := s.statementBuilder. + Select(sqlReleaseTableKeyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if buildErr != nil { + s.Log("failed to build select query: %v", buildErr) + return err + } + + var record SQLReleaseWrapper + if err := transaction.Get(&record, selectQuery, args...); err == nil { + s.Log("release %s already exists", key) + return ErrReleaseExists + } + + s.Log("failed to store release %s in SQL database: %v", key, err) + return err + } + + // Filtering labels before insert cause in SQL storage driver system releases are stored in separate columns of release table + for k, v := range filterSystemLabels(rls.Labels) { + insertLabelsQuery, args, err := s.statementBuilder. + Insert(sqlCustomLabelsTableName). + Columns( + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableKeyColumn, + sqlCustomLabelsTableValueColumn, + ). + Values( + key, + namespace, + k, + v, + ).ToSql() + + if err != nil { + defer transaction.Rollback() + s.Log("failed to build insert query: %v", err) + return err + } + + if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { + defer transaction.Rollback() + s.Log("failed to write Labels: %v", err) + return err + } + } + defer transaction.Commit() + + return nil +} + +// Update updates a release. +func (s *SQL) Update(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + query, args, err := s.statementBuilder. + Update(sqlReleaseTableName). + Set(sqlReleaseTableBodyColumn, body). + Set(sqlReleaseTableNameColumn, rls.Name). + Set(sqlReleaseTableVersionColumn, int(rls.Version)). + Set(sqlReleaseTableStatusColumn, rls.Info.Status.String()). + Set(sqlReleaseTableOwnerColumn, sqlReleaseDefaultOwner). + Set(sqlReleaseTableModifiedAtColumn, int(time.Now().Unix())). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: namespace}). + ToSql() + + if err != nil { + s.Log("failed to build update query: %v", err) + return err + } + + if _, err := s.db.Exec(query, args...); err != nil { + s.Log("failed to update release %s in SQL database: %v", key, err) + return err + } + + return nil +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (s *SQL) Delete(key string) (*rspb.Release, error) { + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return nil, fmt.Errorf("error beginning transaction: %v", err) + } + + selectQuery, args, err := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build select query: %v", err) + return nil, err + } + + var record SQLReleaseWrapper + err = transaction.Get(&record, selectQuery, args...) + if err != nil { + s.Log("release %s not found: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("failed to decode release %s: %v", key, err) + transaction.Rollback() + return nil, err + } + defer transaction.Commit() + + deleteQuery, args, err := s.statementBuilder. + Delete(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build delete query: %v", err) + return nil, err + } + + _, err = transaction.Exec(deleteQuery, args...) + if err != nil { + s.Log("failed perform delete query: %v", err) + return release, err + } + + if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + return nil, err + } + + deleteCustomLabelsQuery, args, err := s.statementBuilder. + Delete(sqlCustomLabelsTableName). + Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key}). + Where(sq.Eq{sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}). + ToSql() + + if err != nil { + s.Log("failed to build delete Labels query: %v", err) + return nil, err + } + _, err = transaction.Exec(deleteCustomLabelsQuery, args...) + return release, err +} + +// Get release custom labels from database +func (s *SQL) getReleaseCustomLabels(key string, _ string) (map[string]string, error) { + query, args, err := s.statementBuilder. + Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn). + From(sqlCustomLabelsTableName). + Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key, + sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + return nil, err + } + + var labelsList = []SQLReleaseCustomLabelWrapper{} + if err := s.db.Select(&labelsList, query, args...); err != nil { + return nil, err + } + + labelsMap := make(map[string]string) + for _, i := range labelsList { + labelsMap[i.Key] = i.Value + } + + return filterSystemLabels(labelsMap), nil +} + +// Rebuild system labels from release object +func getReleaseSystemLabels(rls *rspb.Release) map[string]string { + return map[string]string{ + "name": rls.Name, + "owner": sqlReleaseDefaultOwner, + "status": rls.Info.Status.String(), + "version": strconv.Itoa(rls.Version), + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go new file mode 100644 index 000000000000..7bda5ec966ed --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "io" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var b64 = base64.StdEncoding + +var magicGzip = []byte{0x1f, 0x8b, 0x08} + +var systemLabels = []string{"name", "owner", "status", "version", "createdAt", "modifiedAt"} + +// encodeRelease encodes a release returning a base64 encoded +// gzipped string representation, or error. +func encodeRelease(rls *rspb.Release) (string, error) { + b, err := json.Marshal(rls) + if err != nil { + return "", err + } + var buf bytes.Buffer + w, err := gzip.NewWriterLevel(&buf, gzip.BestCompression) + if err != nil { + return "", err + } + if _, err = w.Write(b); err != nil { + return "", err + } + w.Close() + + return b64.EncodeToString(buf.Bytes()), nil +} + +// decodeRelease decodes the bytes of data into a release +// type. Data must contain a base64 encoded gzipped string of a +// valid release, otherwise an error is returned. +func decodeRelease(data string) (*rspb.Release, error) { + // base64 decode string + b, err := b64.DecodeString(data) + if err != nil { + return nil, err + } + + // For backwards compatibility with releases that were stored before + // compression was introduced we skip decompression if the + // gzip magic header is not found + if len(b) > 3 && bytes.Equal(b[0:3], magicGzip) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, err + } + defer r.Close() + b2, err := io.ReadAll(r) + if err != nil { + return nil, err + } + b = b2 + } + + var rls rspb.Release + // unmarshal release object bytes + if err := json.Unmarshal(b, &rls); err != nil { + return nil, err + } + return &rls, nil +} + +// Checks if label is system +func isSystemLabel(key string) bool { + for _, v := range GetSystemLabels() { + if key == v { + return true + } + } + return false +} + +// Removes system labels from labels map +func filterSystemLabels(lbs map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range lbs { + if !isSystemLabel(k) { + result[k] = v + } + } + return result +} + +// Checks if labels array contains system labels +func ContainsSystemLabels(lbs map[string]string) bool { + for k := range lbs { + if isSystemLabel(k) { + return true + } + } + return false +} + +func GetSystemLabels() []string { + return systemLabels +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/storage.go b/vendor/helm.sh/helm/v3/pkg/storage/storage.go new file mode 100644 index 000000000000..0da0688fd605 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/storage.go @@ -0,0 +1,266 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage // import "helm.sh/helm/v3/pkg/storage" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" + relutil "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// HelmStorageType is the type field of the Kubernetes storage object which stores the Helm release +// version. It is modified slightly replacing the '/': sh.helm/release.v1 +// Note: The version 'v1' is incremented if the release object metadata is +// modified between major releases. +// This constant is used as a prefix for the Kubernetes storage object name. +const HelmStorageType = "sh.helm.release.v1" + +// Storage represents a storage engine for a Release. +type Storage struct { + driver.Driver + + // MaxHistory specifies the maximum number of historical releases that will + // be retained, including the most recent release. Values of 0 or less are + // ignored (meaning no limits are imposed). + MaxHistory int + + Log func(string, ...interface{}) +} + +// Get retrieves the release from storage. An error is returned +// if the storage driver failed to fetch the release, or the +// release identified by the key, version pair does not exist. +func (s *Storage) Get(name string, version int) (*rspb.Release, error) { + s.Log("getting release %q", makeKey(name, version)) + return s.Driver.Get(makeKey(name, version)) +} + +// Create creates a new storage entry holding the release. An +// error is returned if the storage driver fails to store the +// release, or a release with an identical key already exists. +func (s *Storage) Create(rls *rspb.Release) error { + s.Log("creating release %q", makeKey(rls.Name, rls.Version)) + if s.MaxHistory > 0 { + // Want to make space for one more release. + if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && + !errors.Is(err, driver.ErrReleaseNotFound) { + return err + } + } + return s.Driver.Create(makeKey(rls.Name, rls.Version), rls) +} + +// Update updates the release in storage. An error is returned if the +// storage backend fails to update the release or if the release +// does not exist. +func (s *Storage) Update(rls *rspb.Release) error { + s.Log("updating release %q", makeKey(rls.Name, rls.Version)) + return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) +} + +// Delete deletes the release from storage. An error is returned if +// the storage backend fails to delete the release or if the release +// does not exist. +func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { + s.Log("deleting release %q", makeKey(name, version)) + return s.Driver.Delete(makeKey(name, version)) +} + +// ListReleases returns all releases from storage. An error is returned if the +// storage backend fails to retrieve the releases. +func (s *Storage) ListReleases() ([]*rspb.Release, error) { + s.Log("listing all releases in storage") + return s.Driver.List(func(_ *rspb.Release) bool { return true }) +} + +// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { + s.Log("listing uninstalled releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) + }) +} + +// ListDeployed returns all releases with Status == DEPLOYED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListDeployed() ([]*rspb.Release, error) { + s.Log("listing all deployed releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) + }) +} + +// Deployed returns the last deployed release with the provided release name, or +// returns driver.NewErrNoDeployedReleases if not found. +func (s *Storage) Deployed(name string) (*rspb.Release, error) { + ls, err := s.DeployedAll(name) + if err != nil { + return nil, err + } + + if len(ls) == 0 { + return nil, driver.NewErrNoDeployedReleases(name) + } + + // If executed concurrently, Helm's database gets corrupted + // and multiple releases are DEPLOYED. Take the latest. + relutil.Reverse(ls, relutil.SortByRevision) + + return ls[0], nil +} + +// DeployedAll returns all deployed releases with the provided name, or +// returns driver.NewErrNoDeployedReleases if not found. +func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { + s.Log("getting deployed releases from %q history", name) + + ls, err := s.Driver.Query(map[string]string{ + "name": name, + "owner": "helm", + "status": "deployed", + }) + if err == nil { + return ls, nil + } + if strings.Contains(err.Error(), "not found") { + return nil, driver.NewErrNoDeployedReleases(name) + } + return nil, err +} + +// History returns the revision history for the release with the provided name, or +// returns driver.ErrReleaseNotFound if no such release name exists. +func (s *Storage) History(name string) ([]*rspb.Release, error) { + s.Log("getting release history for %q", name) + + return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) +} + +// removeLeastRecent removes items from history until the length number of releases +// does not exceed max. +// +// We allow max to be set explicitly so that calling functions can "make space" +// for the new records they are going to write. +func (s *Storage) removeLeastRecent(name string, max int) error { + if max < 0 { + return nil + } + h, err := s.History(name) + if err != nil { + return err + } + if len(h) <= max { + return nil + } + + // We want oldest to newest + relutil.SortByRevision(h) + + lastDeployed, err := s.Deployed(name) + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { + return err + } + + var toDelete []*rspb.Release + for _, rel := range h { + // once we have enough releases to delete to reach the max, stop + if len(h)-len(toDelete) == max { + break + } + if lastDeployed != nil { + if rel.Version != lastDeployed.Version { + toDelete = append(toDelete, rel) + } + } else { + toDelete = append(toDelete, rel) + } + } + + // Delete as many as possible. In the case of API throughput limitations, + // multiple invocations of this function will eventually delete them all. + errs := []error{} + for _, rel := range toDelete { + err = s.deleteReleaseVersion(name, rel.Version) + if err != nil { + errs = append(errs, err) + } + } + + s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) + switch c := len(errs); c { + case 0: + return nil + case 1: + return errs[0] + default: + return errors.Errorf("encountered %d deletion errors. First is: %s", c, errs[0]) + } +} + +func (s *Storage) deleteReleaseVersion(name string, version int) error { + key := makeKey(name, version) + _, err := s.Delete(name, version) + if err != nil { + s.Log("error pruning %s from release history: %s", key, err) + return err + } + return nil +} + +// Last fetches the last revision of the named release. +func (s *Storage) Last(name string) (*rspb.Release, error) { + s.Log("getting last revision of %q", name) + h, err := s.History(name) + if err != nil { + return nil, err + } + if len(h) == 0 { + return nil, errors.Errorf("no revision for release %q", name) + } + + relutil.Reverse(h, relutil.SortByRevision) + return h[0], nil +} + +// makeKey concatenates the Kubernetes storage object type, a release name and version +// into a string with format:```..v```. +// The storage type is prepended to keep name uniqueness between different +// release storage types. An example of clash when not using the type: +// https://github.com/helm/helm/issues/6435. +// This key is used to uniquely identify storage objects. +func makeKey(rlsname string, version int) string { + return fmt.Sprintf("%s.%s.v%d", HelmStorageType, rlsname, version) +} + +// Init initializes a new storage backend with the driver d. +// If d is nil, the default in-memory driver is used. +func Init(d driver.Driver) *Storage { + // default driver is in memory + if d == nil { + d = driver.NewMemory() + } + return &Storage{ + Driver: d, + Log: func(_ string, _ ...interface{}) {}, + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go new file mode 100644 index 000000000000..63a41c0bf41c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package ctime + +import ( + "os" + "time" +) + +func Created(fi os.FileInfo) time.Time { + return modified(fi) +} + +func Modified(fi os.FileInfo) time.Time { + return modified(fi) +} diff --git a/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go new file mode 100644 index 000000000000..d8a6ea1a17aa --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go @@ -0,0 +1,30 @@ +//go:build linux + +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package ctime + +import ( + "os" + "syscall" + "time" +) + +func modified(fi os.FileInfo) time.Time { + st := fi.Sys().(*syscall.Stat_t) + //nolint + return time.Unix(int64(st.Mtim.Sec), int64(st.Mtim.Nsec)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go new file mode 100644 index 000000000000..12afc6df2eb3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go @@ -0,0 +1,27 @@ +//go:build !linux + +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package ctime + +import ( + "os" + "time" +) + +func modified(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go new file mode 100644 index 000000000000..1abe8ae3d86d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/time/time.go @@ -0,0 +1,91 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package time contains a wrapper for time.Time in the standard library and +// associated methods. This package mainly exists to work around an issue in Go +// where the serializer doesn't omit an empty value for time: +// https://github.com/golang/go/issues/11939. As such, this can be removed if a +// proposal is ever accepted for Go +package time + +import ( + "bytes" + "time" +) + +// emptyString contains an empty JSON string value to be used as output +var emptyString = `""` + +// Time is a convenience wrapper around stdlib time, but with different +// marshalling and unmarshaling for zero values +type Time struct { + time.Time +} + +// Now returns the current time. It is a convenience wrapper around time.Now() +func Now() Time { + return Time{time.Now()} +} + +func (t Time) MarshalJSON() ([]byte, error) { + if t.Time.IsZero() { + return []byte(emptyString), nil + } + + return t.Time.MarshalJSON() +} + +func (t *Time) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + return nil + } + // If it is empty, we don't have to set anything since time.Time is not a + // pointer and will be set to the zero value + if bytes.Equal([]byte(emptyString), b) { + return nil + } + + return t.Time.UnmarshalJSON(b) +} + +func Parse(layout, value string) (Time, error) { + t, err := time.Parse(layout, value) + return Time{Time: t}, err +} +func ParseInLocation(layout, value string, loc *time.Location) (Time, error) { + t, err := time.ParseInLocation(layout, value, loc) + return Time{Time: t}, err +} + +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{Time: time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +func Unix(sec int64, nsec int64) Time { return Time{Time: time.Unix(sec, nsec)} } + +func (t Time) Add(d time.Duration) Time { return Time{Time: t.Time.Add(d)} } +func (t Time) AddDate(years int, months int, days int) Time { + return Time{Time: t.Time.AddDate(years, months, days)} +} +func (t Time) After(u Time) bool { return t.Time.After(u.Time) } +func (t Time) Before(u Time) bool { return t.Time.Before(u.Time) } +func (t Time) Equal(u Time) bool { return t.Time.Equal(u.Time) } +func (t Time) In(loc *time.Location) Time { return Time{Time: t.Time.In(loc)} } +func (t Time) Local() Time { return Time{Time: t.Time.Local()} } +func (t Time) Round(d time.Duration) Time { return Time{Time: t.Time.Round(d)} } +func (t Time) Sub(u Time) time.Duration { return t.Time.Sub(u.Time) } +func (t Time) Truncate(d time.Duration) Time { return Time{Time: t.Time.Truncate(d)} } +func (t Time) UTC() Time { return Time{Time: t.Time.UTC()} } diff --git a/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go b/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go new file mode 100644 index 000000000000..d7e94040666a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uploader + +import ( + "fmt" + "io" + "net/url" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/pusher" + "helm.sh/helm/v3/pkg/registry" +) + +// ChartUploader handles uploading a chart. +type ChartUploader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Pusher collection for the operation + Pushers pusher.Providers + // Options provide parameters to be passed along to the Pusher being initialized. + Options []pusher.Option + // RegistryClient is a client for interacting with registries. + RegistryClient *registry.Client +} + +// UploadTo uploads a chart. Depending on the settings, it may also upload a provenance file. +func (c *ChartUploader) UploadTo(ref, remote string) error { + u, err := url.Parse(remote) + if err != nil { + return errors.Errorf("invalid chart URL format: %s", remote) + } + + if u.Scheme == "" { + return fmt.Errorf("scheme prefix missing from remote (e.g. \"%s://\")", registry.OCIScheme) + } + + p, err := c.Pushers.ByScheme(u.Scheme) + if err != nil { + return err + } + + return p.Push(ref, u.String(), c.Options...) +} diff --git a/vendor/helm.sh/helm/v3/pkg/uploader/doc.go b/vendor/helm.sh/helm/v3/pkg/uploader/doc.go new file mode 100644 index 000000000000..112ddbf2ce06 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/uploader/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package uploader provides a library for uploading charts. + +This package contains tools for uploading charts to registries. +*/ +package uploader diff --git a/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go new file mode 100644 index 000000000000..786c16e27aae --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go @@ -0,0 +1,160 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonmergepatch + +import ( + "fmt" + "reflect" + + "gopkg.in/evanphx/json-patch.v4" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// Create a 3-way merge patch based-on JSON merge patch. +// Calculate addition-and-change patch between current and modified. +// Calculate deletion patch between original and modified. +func CreateThreeWayJSONMergePatch(original, modified, current []byte, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + if len(original) == 0 { + original = []byte(`{}`) + } + if len(modified) == 0 { + modified = []byte(`{}`) + } + if len(current) == 0 { + current = []byte(`{}`) + } + + addAndChangePatch, err := jsonpatch.CreateMergePatch(current, modified) + if err != nil { + return nil, err + } + // Only keep addition and changes + addAndChangePatch, addAndChangePatchObj, err := keepOrDeleteNullInJsonPatch(addAndChangePatch, false) + if err != nil { + return nil, err + } + + deletePatch, err := jsonpatch.CreateMergePatch(original, modified) + if err != nil { + return nil, err + } + // Only keep deletion + deletePatch, deletePatchObj, err := keepOrDeleteNullInJsonPatch(deletePatch, true) + if err != nil { + return nil, err + } + + hasConflicts, err := mergepatch.HasConflicts(addAndChangePatchObj, deletePatchObj) + if err != nil { + return nil, err + } + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(addAndChangePatchObj), mergepatch.ToYAMLOrError(deletePatchObj)) + } + patch, err := jsonpatch.MergePatch(deletePatch, addAndChangePatch) + if err != nil { + return nil, err + } + + var patchMap map[string]interface{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal patch for precondition check: %s", patch) + } + meetPreconditions, err := meetPreconditions(patchMap, fns...) + if err != nil { + return nil, err + } + if !meetPreconditions { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + + return patch, nil +} + +// keepOrDeleteNullInJsonPatch takes a json-encoded byte array and a boolean. +// It returns a filtered object and its corresponding json-encoded byte array. +// It is a wrapper of func keepOrDeleteNullInObj +func keepOrDeleteNullInJsonPatch(patch []byte, keepNull bool) ([]byte, map[string]interface{}, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, nil, err + } + filteredMap, err := keepOrDeleteNullInObj(patchMap, keepNull) + if err != nil { + return nil, nil, err + } + o, err := json.Marshal(filteredMap) + return o, filteredMap, err +} + +// keepOrDeleteNullInObj will keep only the null value and delete all the others, +// if keepNull is true. Otherwise, it will delete all the null value and keep the others. +func keepOrDeleteNullInObj(m map[string]interface{}, keepNull bool) (map[string]interface{}, error) { + filteredMap := make(map[string]interface{}) + var err error + for key, val := range m { + switch { + case keepNull && val == nil: + filteredMap[key] = nil + case val != nil: + switch typedVal := val.(type) { + case map[string]interface{}: + // Explicitly-set empty maps are treated as values instead of empty patches + if len(typedVal) == 0 { + if !keepNull { + filteredMap[key] = typedVal + } + continue + } + + var filteredSubMap map[string]interface{} + filteredSubMap, err = keepOrDeleteNullInObj(typedVal, keepNull) + if err != nil { + return nil, err + } + + // If the returned filtered submap was empty, this is an empty patch for the entire subdict, so the key + // should not be set + if len(filteredSubMap) != 0 { + filteredMap[key] = filteredSubMap + } + + case []interface{}, string, float64, bool, int64, nil: + // Lists are always replaced in Json, no need to check each entry in the list. + if !keepNull { + filteredMap[key] = val + } + default: + return nil, fmt.Errorf("unknown type: %v", reflect.TypeOf(typedVal)) + } + } + } + return filteredMap, nil +} + +func meetPreconditions(patchObj map[string]interface{}, fns ...mergepatch.PreconditionFunc) (bool, error) { + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchObj) { + return false, fmt.Errorf("precondition failed for: %v", patchObj) + } + } + return true, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c47c2dd7e38a..9c36b2f398b4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -46,11 +46,12 @@ cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb +# dario.cat/mergo v1.0.1 +## explicit; go 1.13 +dario.cat/mergo # git.sr.ht/~sbinet/gg v0.5.0 ## explicit; go 1.19 git.sr.ht/~sbinet/gg -# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 -## explicit; go 1.20 # github.com/Azure/azure-pipeline-go v0.2.3 ## explicit; go 1.14 github.com/Azure/azure-pipeline-go/pipeline @@ -150,7 +151,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage # github.com/Azure/azure-storage-blob-go v0.15.0 ## explicit; go 1.15 github.com/Azure/azure-storage-blob-go/azblob -# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 +# github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm @@ -214,6 +215,10 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public +# github.com/BurntSushi/toml v1.5.0 +## explicit; go 1.18 +github.com/BurntSushi/toml +github.com/BurntSushi/toml/internal # github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 ## explicit; go 1.23.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp @@ -310,9 +315,21 @@ github.com/JeffAshton/win_pdh # github.com/MakeNowJust/heredoc v1.0.0 ## explicit; go 1.12 github.com/MakeNowJust/heredoc +# github.com/Masterminds/goutils v1.1.1 +## explicit +github.com/Masterminds/goutils # github.com/Masterminds/semver v1.5.0 ## explicit github.com/Masterminds/semver +# github.com/Masterminds/semver/v3 v3.3.0 +## explicit; go 1.21 +github.com/Masterminds/semver/v3 +# github.com/Masterminds/sprig/v3 v3.3.0 +## explicit; go 1.21 +github.com/Masterminds/sprig/v3 +# github.com/Masterminds/squirrel v1.5.4 +## explicit; go 1.14 +github.com/Masterminds/squirrel # github.com/Microsoft/go-winio v0.6.2 ## explicit; go 1.21 github.com/Microsoft/go-winio @@ -484,6 +501,16 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/container-storage-interface/spec v1.9.0 ## explicit; go 1.18 github.com/container-storage-interface/spec/lib/go/csi +# github.com/containerd/containerd v1.7.27 +## explicit; go 1.21 +github.com/containerd/containerd/archive/compression +github.com/containerd/containerd/content +github.com/containerd/containerd/errdefs +github.com/containerd/containerd/filters +github.com/containerd/containerd/images +github.com/containerd/containerd/labels +github.com/containerd/containerd/pkg/randutil +github.com/containerd/containerd/remotes # github.com/containerd/containerd/api v1.8.0 ## explicit; go 1.21 github.com/containerd/containerd/api/services/containers/v1 @@ -502,7 +529,10 @@ github.com/containerd/errdefs/pkg/internal/types # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log -# github.com/containerd/ttrpc v1.2.6 +# github.com/containerd/platforms v0.2.1 +## explicit; go 1.20 +github.com/containerd/platforms +# github.com/containerd/ttrpc v1.2.7 ## explicit; go 1.19 github.com/containerd/ttrpc # github.com/containerd/typeurl/v2 v2.2.2 @@ -532,7 +562,7 @@ github.com/davecgh/go-spew/spew # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 +# github.com/distribution/distribution/v3 v3.0.0 => github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 ## explicit; go 1.18 github.com/distribution/distribution/v3 github.com/distribution/distribution/v3/manifest @@ -645,6 +675,9 @@ github.com/envoyproxy/protoc-gen-validate/validate # github.com/euank/go-kmsg-parser v2.0.0+incompatible ## explicit github.com/euank/go-kmsg-parser/kmsgparser +# github.com/evanphx/json-patch v5.9.11+incompatible +## explicit +github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.9.11 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 @@ -655,6 +688,9 @@ github.com/exponent-io/jsonpath # github.com/fatih/camelcase v1.0.0 ## explicit github.com/fatih/camelcase +# github.com/fatih/color v1.18.0 +## explicit; go 1.17 +github.com/fatih/color # github.com/felixge/fgprof v0.9.4 ## explicit; go 1.14 github.com/felixge/fgprof @@ -713,6 +749,9 @@ github.com/go-fonts/liberation/liberationserifbold github.com/go-fonts/liberation/liberationserifbolditalic github.com/go-fonts/liberation/liberationserifitalic github.com/go-fonts/liberation/liberationserifregular +# github.com/go-gorp/gorp/v3 v3.1.0 +## explicit; go 1.18 +github.com/go-gorp/gorp/v3 # github.com/go-jose/go-jose/v4 v4.1.1 ## explicit; go 1.23.0 github.com/go-jose/go-jose/v4 @@ -801,6 +840,16 @@ github.com/go-playground/validator/v10 # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 github.com/go-task/slim-sprig/v3 +# github.com/gobwas/glob v0.2.3 +## explicit +github.com/gobwas/glob +github.com/gobwas/glob/compiler +github.com/gobwas/glob/match +github.com/gobwas/glob/syntax +github.com/gobwas/glob/syntax/ast +github.com/gobwas/glob/syntax/lexer +github.com/gobwas/glob/util/runes +github.com/gobwas/glob/util/strings # github.com/gocarina/gocsv v0.0.0-20231116093920-b87c2d0e983a ## explicit; go 1.13 github.com/gocarina/gocsv @@ -995,6 +1044,11 @@ github.com/gophercloud/gophercloud/pagination # github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 ## explicit; go 1.20 github.com/gorilla/websocket +# github.com/gosuri/uitable v0.0.4 +## explicit +github.com/gosuri/uitable +github.com/gosuri/uitable/util/strutil +github.com/gosuri/uitable/util/wordwrap # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache @@ -1019,12 +1073,18 @@ github.com/h2non/gock # github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 ## explicit github.com/h2non/parth +# github.com/hashicorp/errwrap v1.1.0 +## explicit +github.com/hashicorp/errwrap # github.com/hashicorp/go-checkpoint v0.5.0 ## explicit github.com/hashicorp/go-checkpoint # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 +github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.7.7 ## explicit; go 1.19 github.com/hashicorp/go-retryablehttp @@ -1064,6 +1124,9 @@ github.com/hashicorp/terraform-exec/tfexec # github.com/hashicorp/terraform-json v0.25.0 ## explicit; go 1.18 github.com/hashicorp/terraform-json +# github.com/huandu/xstrings v1.5.0 +## explicit; go 1.12 +github.com/huandu/xstrings # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -1073,6 +1136,10 @@ github.com/jbenet/go-context/io # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath +# github.com/jmoiron/sqlx v1.4.0 +## explicit; go 1.10 +github.com/jmoiron/sqlx +github.com/jmoiron/sqlx/reflectx # github.com/jonboulle/clockwork v0.5.0 ## explicit; go 1.21 github.com/jonboulle/clockwork @@ -1109,6 +1176,12 @@ github.com/klauspost/compress/zstd/internal/xxhash ## explicit; go 1.11 github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/pretty +# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +## explicit +github.com/lann/builder +# github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 +## explicit +github.com/lann/ps # github.com/leodido/go-urn v1.4.0 ## explicit; go 1.18 github.com/leodido/go-urn @@ -1133,6 +1206,11 @@ github.com/lestrrat-go/structinfo # github.com/lestrrat/go-jsschema v0.0.0-20181205002244-5c81c58ffcc3 ## explicit github.com/lestrrat/go-jsschema +# github.com/lib/pq v1.10.9 +## explicit; go 1.13 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram # github.com/libopenstorage/openstorage v1.0.0 ## explicit github.com/libopenstorage/openstorage/api @@ -1165,6 +1243,9 @@ github.com/mattn/go-ieproxy # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty +# github.com/mattn/go-runewidth v0.0.9 +## explicit; go 1.9 +github.com/mattn/go-runewidth # github.com/mattn/go-sqlite3 v1.14.30 ## explicit; go 1.19 github.com/mattn/go-sqlite3 @@ -1289,6 +1370,9 @@ github.com/microsoftgraph/msgraph-sdk-go-core/authentication # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible ## explicit github.com/mistifyio/go-zfs +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 +github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir @@ -1298,6 +1382,9 @@ github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.2 +## explicit +github.com/mitchellh/reflectwalk # github.com/moby/docker-image-spec v1.3.1 ## explicit; go 1.18 github.com/moby/docker-image-spec/specs-go/v1 @@ -1320,7 +1407,7 @@ github.com/moby/sys/user # github.com/moby/sys/userns v0.1.0 ## explicit; go 1.21 github.com/moby/sys/userns -# github.com/moby/term v0.5.0 +# github.com/moby/term v0.5.2 ## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows @@ -1844,12 +1931,19 @@ github.com/robfig/cron/v3 github.com/rs/zerolog github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json +# github.com/rubenv/sql-migrate v1.8.0 +## explicit; go 1.23.0 +github.com/rubenv/sql-migrate +github.com/rubenv/sql-migrate/sqlparse # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 # github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 ## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch +# github.com/shopspring/decimal v1.4.0 +## explicit; go 1.10 +github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -1863,8 +1957,8 @@ github.com/soheilhy/cmux github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.5.1 -## explicit; go 1.18 +# github.com/spf13/cast v1.7.0 +## explicit; go 1.19 github.com/spf13/cast # github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 @@ -1968,6 +2062,13 @@ github.com/x448/float16 github.com/xanzy/ssh-agent # github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb ## explicit +github.com/xeipuuv/gojsonpointer +# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +## explicit +github.com/xeipuuv/gojsonreference +# github.com/xeipuuv/gojsonschema v1.2.0 +## explicit +github.com/xeipuuv/gojsonschema # github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 ## explicit github.com/xiang90/probing @@ -2236,6 +2337,7 @@ golang.org/x/crypto/md4 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor +golang.org/x/crypto/openpgp/clearsign golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet @@ -2244,6 +2346,7 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent @@ -2702,6 +2805,45 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# helm.sh/helm/v3 v3.18.0 +## explicit; go 1.24.0 +helm.sh/helm/v3/internal/fileutil +helm.sh/helm/v3/internal/resolver +helm.sh/helm/v3/internal/sympath +helm.sh/helm/v3/internal/third_party/dep/fs +helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util +helm.sh/helm/v3/internal/tlsutil +helm.sh/helm/v3/internal/urlutil +helm.sh/helm/v3/internal/version +helm.sh/helm/v3/pkg/action +helm.sh/helm/v3/pkg/chart +helm.sh/helm/v3/pkg/chart/loader +helm.sh/helm/v3/pkg/chartutil +helm.sh/helm/v3/pkg/cli +helm.sh/helm/v3/pkg/downloader +helm.sh/helm/v3/pkg/engine +helm.sh/helm/v3/pkg/getter +helm.sh/helm/v3/pkg/helmpath +helm.sh/helm/v3/pkg/helmpath/xdg +helm.sh/helm/v3/pkg/ignore +helm.sh/helm/v3/pkg/kube +helm.sh/helm/v3/pkg/kube/fake +helm.sh/helm/v3/pkg/lint +helm.sh/helm/v3/pkg/lint/rules +helm.sh/helm/v3/pkg/lint/support +helm.sh/helm/v3/pkg/plugin +helm.sh/helm/v3/pkg/postrender +helm.sh/helm/v3/pkg/provenance +helm.sh/helm/v3/pkg/pusher +helm.sh/helm/v3/pkg/registry +helm.sh/helm/v3/pkg/release +helm.sh/helm/v3/pkg/releaseutil +helm.sh/helm/v3/pkg/repo +helm.sh/helm/v3/pkg/storage +helm.sh/helm/v3/pkg/storage/driver +helm.sh/helm/v3/pkg/time +helm.sh/helm/v3/pkg/time/ctime +helm.sh/helm/v3/pkg/uploader # k8s.io/api v0.34.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20251017123720-96593f323733 ## explicit; go 1.24.0 k8s.io/api/admission/v1 @@ -2868,6 +3010,7 @@ k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/util/httpstream/wsstream k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/jsonmergepatch k8s.io/apimachinery/pkg/util/managedfields k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch @@ -4617,6 +4760,39 @@ k8s.io/utils/ptr k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace +# oras.land/oras-go/v2 v2.5.0 +## explicit; go 1.21 +oras.land/oras-go/v2 +oras.land/oras-go/v2/content +oras.land/oras-go/v2/content/memory +oras.land/oras-go/v2/errdef +oras.land/oras-go/v2/internal/cas +oras.land/oras-go/v2/internal/container/set +oras.land/oras-go/v2/internal/copyutil +oras.land/oras-go/v2/internal/descriptor +oras.land/oras-go/v2/internal/docker +oras.land/oras-go/v2/internal/graph +oras.land/oras-go/v2/internal/httputil +oras.land/oras-go/v2/internal/interfaces +oras.land/oras-go/v2/internal/ioutil +oras.land/oras-go/v2/internal/manifestutil +oras.land/oras-go/v2/internal/platform +oras.land/oras-go/v2/internal/registryutil +oras.land/oras-go/v2/internal/resolver +oras.land/oras-go/v2/internal/spec +oras.land/oras-go/v2/internal/status +oras.land/oras-go/v2/internal/syncutil +oras.land/oras-go/v2/registry +oras.land/oras-go/v2/registry/remote +oras.land/oras-go/v2/registry/remote/auth +oras.land/oras-go/v2/registry/remote/credentials +oras.land/oras-go/v2/registry/remote/credentials/internal/config +oras.land/oras-go/v2/registry/remote/credentials/internal/executer +oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil +oras.land/oras-go/v2/registry/remote/credentials/trace +oras.land/oras-go/v2/registry/remote/errcode +oras.land/oras-go/v2/registry/remote/internal/errutil +oras.land/oras-go/v2/registry/remote/retry # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 ## explicit; go 1.21 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client @@ -4895,7 +5071,9 @@ sigs.k8s.io/structured-merge-diff/v6/value ## explicit; go 1.22 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 +sigs.k8s.io/yaml/goyaml.v3 sigs.k8s.io/yaml/kyaml +# github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20230530204932-ba46c769b3d1 # github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 # k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20251017123720-96593f323733 diff --git a/vendor/oras.land/oras-go/v2/.gitignore b/vendor/oras.land/oras-go/v2/.gitignore new file mode 100644 index 000000000000..400a0ea0cbd6 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/.gitignore @@ -0,0 +1,41 @@ +# Copyright The ORAS Authors. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# VS Code +.vscode +debug + +# Jetbrains +.idea + +# Custom +coverage.txt +bin/ +dist/ +*.tar.gz +vendor/ +_dist/ +.cover diff --git a/vendor/oras.land/oras-go/v2/CODEOWNERS b/vendor/oras.land/oras-go/v2/CODEOWNERS new file mode 100644 index 000000000000..45a68a31c208 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/CODEOWNERS @@ -0,0 +1,2 @@ +# Derived from OWNERS.md +* @sajayantony @shizhMSFT @stevelasker @Wwwsylvia diff --git a/vendor/oras.land/oras-go/v2/CODE_OF_CONDUCT.md b/vendor/oras.land/oras-go/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..49579ab8001c --- /dev/null +++ b/vendor/oras.land/oras-go/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Code of Conduct + +OCI Registry As Storage (ORAS) follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/oras.land/oras-go/v2/LICENSE b/vendor/oras.land/oras-go/v2/LICENSE new file mode 100644 index 000000000000..a67d1693894d --- /dev/null +++ b/vendor/oras.land/oras-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 ORAS Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/oras.land/oras-go/v2/MIGRATION_GUIDE.md b/vendor/oras.land/oras-go/v2/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..b9292f143609 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/MIGRATION_GUIDE.md @@ -0,0 +1,45 @@ +# Migration Guide + +In version `v2`, ORAS Go library has been completely refreshed with: + +- More unified interfaces +- Notably fewer dependencies +- Higher test coverage +- Better documentation + +**Besides, ORAS Go `v2` is now a registry client.** + +## Major Changes in `v2` + +- Moves `content.FileStore` to [file.Store](https://pkg.go.dev/oras.land/oras-go/v2/content/file#Store) +- Moves `content.OCIStore` to [oci.Store](https://pkg.go.dev/oras.land/oras-go/v2/content/oci#Store) +- Moves `content.MemoryStore` to [memory.Store](https://pkg.go.dev/oras.land/oras-go/v2/content/memory#Store) +- Provides [SDK](https://pkg.go.dev/oras.land/oras-go/v2/registry/remote) to interact with OCI-compliant and Docker-compliant registries +- Supports [Copy](https://pkg.go.dev/oras.land/oras-go/v2#Copy) with more flexible options +- Supports [Extended Copy](https://pkg.go.dev/oras.land/oras-go/v2#ExtendedCopy) with options *(experimental)* +- No longer supports `docker.Login` and `docker.Logout` (removes the dependency on `docker`); instead, provides authentication through [auth.Client](https://pkg.go.dev/oras.land/oras-go/v2/registry/remote/auth#Client) + +Documentation and examples are available at [pkg.go.dev](https://pkg.go.dev/oras.land/oras-go/v2). + +## Migrating from `v1` to `v2` + +1. Get the `v2` package + + ```sh + go get oras.land/oras-go/v2 + ``` + +2. Import and use the `v2` package + + ```go + import "oras.land/oras-go/v2" + ``` + +3. Run + + ```sh + go mod tidy + ``` + +Since breaking changes are introduced in `v2`, code refactoring is required for migrating from `v1` to `v2`. +The migration can be done in an iterative fashion, as `v1` and `v2` can be imported and used at the same time. diff --git a/vendor/oras.land/oras-go/v2/Makefile b/vendor/oras.land/oras-go/v2/Makefile new file mode 100644 index 000000000000..bc671e440502 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/Makefile @@ -0,0 +1,38 @@ +# Copyright The ORAS Authors. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: test +test: vendor check-encoding + go test -race -v -coverprofile=coverage.txt -covermode=atomic ./... + +.PHONY: covhtml +covhtml: + open .cover/coverage.html + +.PHONY: clean +clean: + git status --ignored --short | grep '^!! ' | sed 's/!! //' | xargs rm -rf + +.PHONY: check-encoding +check-encoding: + ! find . -not -path "./vendor/*" -name "*.go" -type f -exec file "{}" ";" | grep CRLF + ! find scripts -name "*.sh" -type f -exec file "{}" ";" | grep CRLF + +.PHONY: fix-encoding +fix-encoding: + find . -not -path "./vendor/*" -name "*.go" -type f -exec sed -i -e "s/\r//g" {} + + find scripts -name "*.sh" -type f -exec sed -i -e "s/\r//g" {} + + +.PHONY: vendor +vendor: + go mod vendor diff --git a/vendor/oras.land/oras-go/v2/OWNERS.md b/vendor/oras.land/oras-go/v2/OWNERS.md new file mode 100644 index 000000000000..402c4a97dffd --- /dev/null +++ b/vendor/oras.land/oras-go/v2/OWNERS.md @@ -0,0 +1,11 @@ +# Owners + +Owners: + - Sajay Antony (@sajayantony) + - Shiwei Zhang (@shizhMSFT) + - Steve Lasker (@stevelasker) + - Sylvia Lei (@Wwwsylvia) + +Emeritus: + - Avi Deitcher (@deitch) + - Josh Dolitsky (@jdolitsky) diff --git a/vendor/oras.land/oras-go/v2/README.md b/vendor/oras.land/oras-go/v2/README.md new file mode 100644 index 000000000000..7c3013c7a2b5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/README.md @@ -0,0 +1,58 @@ +# ORAS Go library + +

+banner +

+ +## Project status + +### Versioning + +The ORAS Go library follows [Semantic Versioning](https://semver.org/), where breaking changes are reserved for MAJOR releases, and MINOR and PATCH releases must be 100% backwards compatible. + +### v2: stable + +[![Build Status](https://github.com/oras-project/oras-go/actions/workflows/build.yml/badge.svg?event=push&branch=main)](https://github.com/oras-project/oras-go/actions/workflows/build.yml?query=workflow%3Abuild+event%3Apush+branch%3Amain) +[![codecov](https://codecov.io/gh/oras-project/oras-go/branch/main/graph/badge.svg)](https://codecov.io/gh/oras-project/oras-go) +[![Go Report Card](https://goreportcard.com/badge/oras.land/oras-go/v2)](https://goreportcard.com/report/oras.land/oras-go/v2) +[![Go Reference](https://pkg.go.dev/badge/oras.land/oras-go/v2.svg)](https://pkg.go.dev/oras.land/oras-go/v2) + +The version `2` is actively developed in the [`main`](https://github.com/oras-project/oras-go/tree/main) branch with all new features. + +> [!Note] +> The `main` branch follows [Go's Security Policy](https://github.com/golang/go/security/policy) and supports the two latest versions of Go (currently `1.21` and `1.22`). + +Examples for common use cases can be found below: + +- [Copy examples](https://pkg.go.dev/oras.land/oras-go/v2#pkg-examples) +- [Registry interaction examples](https://pkg.go.dev/oras.land/oras-go/v2/registry#pkg-examples) +- [Repository interaction examples](https://pkg.go.dev/oras.land/oras-go/v2/registry/remote#pkg-examples) +- [Authentication examples](https://pkg.go.dev/oras.land/oras-go/v2/registry/remote/auth#pkg-examples) + +If you are seeking latest changes, you should use the [`main`](https://github.com/oras-project/oras-go/tree/main) branch (or a specific commit hash) over a tagged version when including the ORAS Go library in your project's `go.mod`. +The Go Reference for the `main` branch is available [here](https://pkg.go.dev/oras.land/oras-go/v2@main). + +To migrate from `v1` to `v2`, see [MIGRATION_GUIDE.md](./MIGRATION_GUIDE.md). + +### v1: stable + +[![Build Status](https://github.com/oras-project/oras-go/actions/workflows/build.yml/badge.svg?event=push&branch=v1)](https://github.com/oras-project/oras-go/actions/workflows/build.yml?query=workflow%3Abuild+event%3Apush+branch%3Av1) +[![Go Report Card](https://goreportcard.com/badge/oras.land/oras-go)](https://goreportcard.com/report/oras.land/oras-go) +[![Go Reference](https://pkg.go.dev/badge/oras.land/oras-go.svg)](https://pkg.go.dev/oras.land/oras-go) + +As there are various stable projects depending on the ORAS Go library `v1`, the +[`v1`](https://github.com/oras-project/oras-go/tree/v1) branch +is maintained for API stability, dependency updates, and security patches. +All `v1.*` releases are based upon this branch. + +Since `v1` is in a maintenance state, you are highly encouraged +to use releases with major version `2` for new features. + +## Docs + +- [oras.land/client_libraries/go](https://oras.land/docs/Client_Libraries/go): Documentation for the ORAS Go library +- [Reviewing guide](https://github.com/oras-project/community/blob/main/REVIEWING.md): All reviewers must read the reviewing guide and agree to follow the project review guidelines. + +## Code of Conduct + +This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). See [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for further details. diff --git a/vendor/oras.land/oras-go/v2/SECURITY.md b/vendor/oras.land/oras-go/v2/SECURITY.md new file mode 100644 index 000000000000..ffefe3417fc2 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +Please follow the [security policy](https://oras.land/docs/community/reporting_security_concerns) to report a security vulnerability or concern. diff --git a/vendor/oras.land/oras-go/v2/content.go b/vendor/oras.land/oras-go/v2/content.go new file mode 100644 index 000000000000..b8bf26386f3c --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content.go @@ -0,0 +1,411 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/interfaces" + "oras.land/oras-go/v2/internal/platform" + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry" + "oras.land/oras-go/v2/registry/remote/auth" +) + +const ( + // defaultTagConcurrency is the default concurrency of tagging. + defaultTagConcurrency int = 5 // This value is consistent with dockerd + + // defaultTagNMaxMetadataBytes is the default value of + // TagNOptions.MaxMetadataBytes. + defaultTagNMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB + + // defaultResolveMaxMetadataBytes is the default value of + // ResolveOptions.MaxMetadataBytes. + defaultResolveMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB + + // defaultMaxBytes is the default value of FetchBytesOptions.MaxBytes. + defaultMaxBytes int64 = 4 * 1024 * 1024 // 4 MiB +) + +// DefaultTagNOptions provides the default TagNOptions. +var DefaultTagNOptions TagNOptions + +// TagNOptions contains parameters for [oras.TagN]. +type TagNOptions struct { + // Concurrency limits the maximum number of concurrent tag tasks. + // If less than or equal to 0, a default (currently 5) is used. + Concurrency int + + // MaxMetadataBytes limits the maximum size of metadata that can be cached + // in the memory. + // If less than or equal to 0, a default (currently 4 MiB) is used. + MaxMetadataBytes int64 +} + +// TagN tags the descriptor identified by srcReference with dstReferences. +func TagN(ctx context.Context, target Target, srcReference string, dstReferences []string, opts TagNOptions) (ocispec.Descriptor, error) { + switch len(dstReferences) { + case 0: + return ocispec.Descriptor{}, fmt.Errorf("dstReferences cannot be empty: %w", errdef.ErrMissingReference) + case 1: + return Tag(ctx, target, srcReference, dstReferences[0]) + } + + if opts.Concurrency <= 0 { + opts.Concurrency = defaultTagConcurrency + } + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultTagNMaxMetadataBytes + } + + _, isRefFetcher := target.(registry.ReferenceFetcher) + _, isRefPusher := target.(registry.ReferencePusher) + if isRefFetcher && isRefPusher { + if repo, ok := target.(interfaces.ReferenceParser); ok { + // add scope hints to minimize the number of auth requests + ref, err := repo.ParseReference(srcReference) + if err != nil { + return ocispec.Descriptor{}, err + } + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull, auth.ActionPush) + } + + desc, contentBytes, err := FetchBytes(ctx, target, srcReference, FetchBytesOptions{ + MaxBytes: opts.MaxMetadataBytes, + }) + if err != nil { + if errors.Is(err, errdef.ErrSizeExceedsLimit) { + err = fmt.Errorf( + "content size %v exceeds MaxMetadataBytes %v: %w", + desc.Size, + opts.MaxMetadataBytes, + errdef.ErrSizeExceedsLimit) + } + return ocispec.Descriptor{}, err + } + + if err := tagBytesN(ctx, target, desc, contentBytes, dstReferences, TagBytesNOptions{ + Concurrency: opts.Concurrency, + }); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil + } + + desc, err := target.Resolve(ctx, srcReference) + if err != nil { + return ocispec.Descriptor{}, err + } + eg, egCtx := syncutil.LimitGroup(ctx, opts.Concurrency) + for _, dstRef := range dstReferences { + eg.Go(func(dst string) func() error { + return func() error { + if err := target.Tag(egCtx, desc, dst); err != nil { + return fmt.Errorf("failed to tag %s as %s: %w", srcReference, dst, err) + } + return nil + } + }(dstRef)) + } + + if err := eg.Wait(); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +// Tag tags the descriptor identified by src with dst. +func Tag(ctx context.Context, target Target, src, dst string) (ocispec.Descriptor, error) { + refFetcher, okFetch := target.(registry.ReferenceFetcher) + refPusher, okPush := target.(registry.ReferencePusher) + if okFetch && okPush { + if repo, ok := target.(interfaces.ReferenceParser); ok { + // add scope hints to minimize the number of auth requests + ref, err := repo.ParseReference(src) + if err != nil { + return ocispec.Descriptor{}, err + } + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull, auth.ActionPush) + } + desc, rc, err := refFetcher.FetchReference(ctx, src) + if err != nil { + return ocispec.Descriptor{}, err + } + defer rc.Close() + if err := refPusher.PushReference(ctx, desc, rc, dst); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil + } + + desc, err := target.Resolve(ctx, src) + if err != nil { + return ocispec.Descriptor{}, err + } + if err := target.Tag(ctx, desc, dst); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +// DefaultResolveOptions provides the default ResolveOptions. +var DefaultResolveOptions ResolveOptions + +// ResolveOptions contains parameters for [oras.Resolve]. +type ResolveOptions struct { + // TargetPlatform ensures the resolved content matches the target platform + // if the node is a manifest, or selects the first resolved content that + // matches the target platform if the node is a manifest list. + TargetPlatform *ocispec.Platform + + // MaxMetadataBytes limits the maximum size of metadata that can be cached + // in the memory. + // If less than or equal to 0, a default (currently 4 MiB) is used. + MaxMetadataBytes int64 +} + +// Resolve resolves a descriptor with provided reference from the target. +func Resolve(ctx context.Context, target ReadOnlyTarget, reference string, opts ResolveOptions) (ocispec.Descriptor, error) { + if opts.TargetPlatform == nil { + return target.Resolve(ctx, reference) + } + return resolve(ctx, target, nil, reference, opts) +} + +// resolve resolves a descriptor with provided reference from the target, with +// specified caching. +func resolve(ctx context.Context, target ReadOnlyTarget, proxy *cas.Proxy, reference string, opts ResolveOptions) (ocispec.Descriptor, error) { + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultResolveMaxMetadataBytes + } + + if refFetcher, ok := target.(registry.ReferenceFetcher); ok { + // optimize performance for ReferenceFetcher targets + desc, rc, err := refFetcher.FetchReference(ctx, reference) + if err != nil { + return ocispec.Descriptor{}, err + } + defer rc.Close() + + switch desc.MediaType { + case docker.MediaTypeManifestList, ocispec.MediaTypeImageIndex, + docker.MediaTypeManifest, ocispec.MediaTypeImageManifest: + // cache the fetched content + if desc.Size > opts.MaxMetadataBytes { + return ocispec.Descriptor{}, fmt.Errorf( + "content size %v exceeds MaxMetadataBytes %v: %w", + desc.Size, + opts.MaxMetadataBytes, + errdef.ErrSizeExceedsLimit) + } + if proxy == nil { + proxy = cas.NewProxyWithLimit(target, cas.NewMemory(), opts.MaxMetadataBytes) + } + if err := proxy.Cache.Push(ctx, desc, rc); err != nil { + return ocispec.Descriptor{}, err + } + // stop caching as SelectManifest may fetch a config blob + proxy.StopCaching = true + return platform.SelectManifest(ctx, proxy, desc, opts.TargetPlatform) + default: + return ocispec.Descriptor{}, fmt.Errorf("%s: %s: %w", desc.Digest, desc.MediaType, errdef.ErrUnsupported) + } + } + + desc, err := target.Resolve(ctx, reference) + if err != nil { + return ocispec.Descriptor{}, err + } + return platform.SelectManifest(ctx, target, desc, opts.TargetPlatform) +} + +// DefaultFetchOptions provides the default FetchOptions. +var DefaultFetchOptions FetchOptions + +// FetchOptions contains parameters for [oras.Fetch]. +type FetchOptions struct { + // ResolveOptions contains parameters for resolving reference. + ResolveOptions +} + +// Fetch fetches the content identified by the reference. +func Fetch(ctx context.Context, target ReadOnlyTarget, reference string, opts FetchOptions) (ocispec.Descriptor, io.ReadCloser, error) { + if opts.TargetPlatform == nil { + if refFetcher, ok := target.(registry.ReferenceFetcher); ok { + return refFetcher.FetchReference(ctx, reference) + } + + desc, err := target.Resolve(ctx, reference) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + rc, err := target.Fetch(ctx, desc) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + return desc, rc, nil + } + + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultResolveMaxMetadataBytes + } + proxy := cas.NewProxyWithLimit(target, cas.NewMemory(), opts.MaxMetadataBytes) + desc, err := resolve(ctx, target, proxy, reference, opts.ResolveOptions) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + // if the content exists in cache, fetch it from cache + // otherwise fetch without caching + proxy.StopCaching = true + rc, err := proxy.Fetch(ctx, desc) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + return desc, rc, nil +} + +// DefaultFetchBytesOptions provides the default FetchBytesOptions. +var DefaultFetchBytesOptions FetchBytesOptions + +// FetchBytesOptions contains parameters for [oras.FetchBytes]. +type FetchBytesOptions struct { + // FetchOptions contains parameters for fetching content. + FetchOptions + // MaxBytes limits the maximum size of the fetched content bytes. + // If less than or equal to 0, a default (currently 4 MiB) is used. + MaxBytes int64 +} + +// FetchBytes fetches the content bytes identified by the reference. +func FetchBytes(ctx context.Context, target ReadOnlyTarget, reference string, opts FetchBytesOptions) (ocispec.Descriptor, []byte, error) { + if opts.MaxBytes <= 0 { + opts.MaxBytes = defaultMaxBytes + } + + desc, rc, err := Fetch(ctx, target, reference, opts.FetchOptions) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + defer rc.Close() + + if desc.Size > opts.MaxBytes { + return ocispec.Descriptor{}, nil, fmt.Errorf( + "content size %v exceeds MaxBytes %v: %w", + desc.Size, + opts.MaxBytes, + errdef.ErrSizeExceedsLimit) + } + bytes, err := content.ReadAll(rc, desc) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + return desc, bytes, nil +} + +// PushBytes describes the contentBytes using the given mediaType and pushes it. +// If mediaType is not specified, "application/octet-stream" is used. +func PushBytes(ctx context.Context, pusher content.Pusher, mediaType string, contentBytes []byte) (ocispec.Descriptor, error) { + desc := content.NewDescriptorFromBytes(mediaType, contentBytes) + r := bytes.NewReader(contentBytes) + if err := pusher.Push(ctx, desc, r); err != nil { + return ocispec.Descriptor{}, err + } + + return desc, nil +} + +// DefaultTagBytesNOptions provides the default TagBytesNOptions. +var DefaultTagBytesNOptions TagBytesNOptions + +// TagBytesNOptions contains parameters for [oras.TagBytesN]. +type TagBytesNOptions struct { + // Concurrency limits the maximum number of concurrent tag tasks. + // If less than or equal to 0, a default (currently 5) is used. + Concurrency int +} + +// TagBytesN describes the contentBytes using the given mediaType, pushes it, +// and tag it with the given references. +// If mediaType is not specified, "application/octet-stream" is used. +func TagBytesN(ctx context.Context, target Target, mediaType string, contentBytes []byte, references []string, opts TagBytesNOptions) (ocispec.Descriptor, error) { + if len(references) == 0 { + return PushBytes(ctx, target, mediaType, contentBytes) + } + + desc := content.NewDescriptorFromBytes(mediaType, contentBytes) + if opts.Concurrency <= 0 { + opts.Concurrency = defaultTagConcurrency + } + + if err := tagBytesN(ctx, target, desc, contentBytes, references, opts); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +// tagBytesN pushes the contentBytes using the given desc, and tag it with the +// given references. +func tagBytesN(ctx context.Context, target Target, desc ocispec.Descriptor, contentBytes []byte, references []string, opts TagBytesNOptions) error { + eg, egCtx := syncutil.LimitGroup(ctx, opts.Concurrency) + if refPusher, ok := target.(registry.ReferencePusher); ok { + for _, reference := range references { + eg.Go(func(ref string) func() error { + return func() error { + r := bytes.NewReader(contentBytes) + if err := refPusher.PushReference(egCtx, desc, r, ref); err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return fmt.Errorf("failed to tag %s: %w", ref, err) + } + return nil + } + }(reference)) + } + } else { + r := bytes.NewReader(contentBytes) + if err := target.Push(ctx, desc, r); err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return fmt.Errorf("failed to push content: %w", err) + } + for _, reference := range references { + eg.Go(func(ref string) func() error { + return func() error { + if err := target.Tag(egCtx, desc, ref); err != nil { + return fmt.Errorf("failed to tag %s: %w", ref, err) + } + return nil + } + }(reference)) + } + } + + return eg.Wait() +} + +// TagBytes describes the contentBytes using the given mediaType, pushes it, +// and tag it with the given reference. +// If mediaType is not specified, "application/octet-stream" is used. +func TagBytes(ctx context.Context, target Target, mediaType string, contentBytes []byte, reference string) (ocispec.Descriptor, error) { + return TagBytesN(ctx, target, mediaType, contentBytes, []string{reference}, DefaultTagBytesNOptions) +} diff --git a/vendor/oras.land/oras-go/v2/content/descriptor.go b/vendor/oras.land/oras-go/v2/content/descriptor.go new file mode 100644 index 000000000000..8e6c25dec209 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/descriptor.go @@ -0,0 +1,40 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/internal/descriptor" +) + +// NewDescriptorFromBytes returns a descriptor, given the content and media type. +// If no media type is specified, "application/octet-stream" will be used. +func NewDescriptorFromBytes(mediaType string, content []byte) ocispec.Descriptor { + if mediaType == "" { + mediaType = descriptor.DefaultMediaType + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest.FromBytes(content), + Size: int64(len(content)), + } +} + +// Equal returns true if two descriptors point to the same content. +func Equal(a, b ocispec.Descriptor) bool { + return a.Size == b.Size && a.Digest == b.Digest && a.MediaType == b.MediaType +} diff --git a/vendor/oras.land/oras-go/v2/content/graph.go b/vendor/oras.land/oras-go/v2/content/graph.go new file mode 100644 index 000000000000..9ae837285e26 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/graph.go @@ -0,0 +1,122 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "encoding/json" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/spec" +) + +// PredecessorFinder finds out the nodes directly pointing to a given node of a +// directed acyclic graph. +// In other words, returns the "parents" of the current descriptor. +// PredecessorFinder is an extension of Storage. +type PredecessorFinder interface { + // Predecessors returns the nodes directly pointing to the current node. + Predecessors(ctx context.Context, node ocispec.Descriptor) ([]ocispec.Descriptor, error) +} + +// GraphStorage represents a CAS that supports direct predecessor node finding. +type GraphStorage interface { + Storage + PredecessorFinder +} + +// ReadOnlyGraphStorage represents a read-only GraphStorage. +type ReadOnlyGraphStorage interface { + ReadOnlyStorage + PredecessorFinder +} + +// Successors returns the nodes directly pointed by the current node. +// In other words, returns the "children" of the current descriptor. +func Successors(ctx context.Context, fetcher Fetcher, node ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch node.MediaType { + case docker.MediaTypeManifest: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + // OCI manifest schema can be used to marshal docker manifest + var manifest ocispec.Manifest + if err := json.Unmarshal(content, &manifest); err != nil { + return nil, err + } + return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil + case ocispec.MediaTypeImageManifest: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + var manifest ocispec.Manifest + if err := json.Unmarshal(content, &manifest); err != nil { + return nil, err + } + var nodes []ocispec.Descriptor + if manifest.Subject != nil { + nodes = append(nodes, *manifest.Subject) + } + nodes = append(nodes, manifest.Config) + return append(nodes, manifest.Layers...), nil + case docker.MediaTypeManifestList: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + + // OCI manifest index schema can be used to marshal docker manifest list + var index ocispec.Index + if err := json.Unmarshal(content, &index); err != nil { + return nil, err + } + return index.Manifests, nil + case ocispec.MediaTypeImageIndex: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(content, &index); err != nil { + return nil, err + } + var nodes []ocispec.Descriptor + if index.Subject != nil { + nodes = append(nodes, *index.Subject) + } + return append(nodes, index.Manifests...), nil + case spec.MediaTypeArtifactManifest: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + + var manifest spec.Artifact + if err := json.Unmarshal(content, &manifest); err != nil { + return nil, err + } + var nodes []ocispec.Descriptor + if manifest.Subject != nil { + nodes = append(nodes, *manifest.Subject) + } + return append(nodes, manifest.Blobs...), nil + } + return nil, nil +} diff --git a/vendor/oras.land/oras-go/v2/content/limitedstorage.go b/vendor/oras.land/oras-go/v2/content/limitedstorage.go new file mode 100644 index 000000000000..9a6df2f82ffb --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/limitedstorage.go @@ -0,0 +1,50 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/errdef" +) + +// LimitedStorage represents a CAS with a push size limit. +type LimitedStorage struct { + Storage // underlying storage + PushLimit int64 // max size for push +} + +// Push pushes the content, matching the expected descriptor. +// The size of the content cannot exceed the push size limit. +func (ls *LimitedStorage) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { + if expected.Size > ls.PushLimit { + return fmt.Errorf( + "content size %v exceeds push size limit %v: %w", + expected.Size, + ls.PushLimit, + errdef.ErrSizeExceedsLimit) + } + + return ls.Storage.Push(ctx, expected, io.LimitReader(content, expected.Size)) +} + +// LimitStorage returns a storage with a push size limit. +func LimitStorage(s Storage, n int64) *LimitedStorage { + return &LimitedStorage{s, n} +} diff --git a/vendor/oras.land/oras-go/v2/content/memory/memory.go b/vendor/oras.land/oras-go/v2/content/memory/memory.go new file mode 100644 index 000000000000..f4e6c1d963cf --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/memory/memory.go @@ -0,0 +1,96 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memory provides implementation of a memory backed content store. +package memory + +import ( + "context" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/graph" + "oras.land/oras-go/v2/internal/resolver" +) + +// Store represents a memory based store, which implements `oras.Target`. +type Store struct { + storage content.Storage + resolver content.TagResolver + graph *graph.Memory +} + +// New creates a new memory based store. +func New() *Store { + return &Store{ + storage: cas.NewMemory(), + resolver: resolver.NewMemory(), + graph: graph.NewMemory(), + } +} + +// Fetch fetches the content identified by the descriptor. +func (s *Store) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + return s.storage.Fetch(ctx, target) +} + +// Push pushes the content, matching the expected descriptor. +func (s *Store) Push(ctx context.Context, expected ocispec.Descriptor, reader io.Reader) error { + if err := s.storage.Push(ctx, expected, reader); err != nil { + return err + } + + // index predecessors. + // there is no data consistency issue as long as deletion is not implemented + // for the memory store. + return s.graph.Index(ctx, s.storage, expected) +} + +// Exists returns true if the described content exists. +func (s *Store) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { + return s.storage.Exists(ctx, target) +} + +// Resolve resolves a reference to a descriptor. +func (s *Store) Resolve(ctx context.Context, reference string) (ocispec.Descriptor, error) { + return s.resolver.Resolve(ctx, reference) +} + +// Tag tags a descriptor with a reference string. +// Returns ErrNotFound if the tagged content does not exist. +func (s *Store) Tag(ctx context.Context, desc ocispec.Descriptor, reference string) error { + exists, err := s.storage.Exists(ctx, desc) + if err != nil { + return err + } + if !exists { + return fmt.Errorf("%s: %s: %w", desc.Digest, desc.MediaType, errdef.ErrNotFound) + } + return s.resolver.Tag(ctx, desc, reference) +} + +// Predecessors returns the nodes directly pointing to the current node. +// Predecessors returns nil without error if the node does not exists in the +// store. +// Like other operations, calling Predecessors() is go-routine safe. However, +// it does not necessarily correspond to any consistent snapshot of the stored +// contents. +func (s *Store) Predecessors(ctx context.Context, node ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return s.graph.Predecessors(ctx, node) +} diff --git a/vendor/oras.land/oras-go/v2/content/reader.go b/vendor/oras.land/oras-go/v2/content/reader.go new file mode 100644 index 000000000000..e575378e313d --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/reader.go @@ -0,0 +1,144 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "errors" + "fmt" + "io" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrInvalidDescriptorSize is returned by ReadAll() when + // the descriptor has an invalid size. + ErrInvalidDescriptorSize = errors.New("invalid descriptor size") + + // ErrMismatchedDigest is returned by ReadAll() when + // the descriptor has an invalid digest. + ErrMismatchedDigest = errors.New("mismatched digest") + + // ErrTrailingData is returned by ReadAll() when + // there exists trailing data unread when the read terminates. + ErrTrailingData = errors.New("trailing data") +) + +var ( + // errEarlyVerify is returned by VerifyReader.Verify() when + // Verify() is called before completing reading the entire content blob. + errEarlyVerify = errors.New("early verify") +) + +// VerifyReader reads the content described by its descriptor and verifies +// against its size and digest. +type VerifyReader struct { + base *io.LimitedReader + verifier digest.Verifier + verified bool + err error +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +func (vr *VerifyReader) Read(p []byte) (n int, err error) { + if vr.err != nil { + return 0, vr.err + } + + n, err = vr.base.Read(p) + if err != nil { + if err == io.EOF && vr.base.N > 0 { + err = io.ErrUnexpectedEOF + } + vr.err = err + } + return +} + +// Verify checks for remaining unread content and verifies the read content against the digest +func (vr *VerifyReader) Verify() error { + if vr.verified { + return nil + } + if vr.err == nil { + if vr.base.N > 0 { + return errEarlyVerify + } + } else if vr.err != io.EOF { + return vr.err + } + + if err := ensureEOF(vr.base.R); err != nil { + vr.err = err + return vr.err + } + if !vr.verifier.Verified() { + vr.err = ErrMismatchedDigest + return vr.err + } + + vr.verified = true + vr.err = io.EOF + return nil +} + +// NewVerifyReader wraps r for reading content with verification against desc. +func NewVerifyReader(r io.Reader, desc ocispec.Descriptor) *VerifyReader { + verifier := desc.Digest.Verifier() + lr := &io.LimitedReader{ + R: io.TeeReader(r, verifier), + N: desc.Size, + } + return &VerifyReader{ + base: lr, + verifier: verifier, + } +} + +// ReadAll safely reads the content described by the descriptor. +// The read content is verified against the size and the digest +// using a VerifyReader. +func ReadAll(r io.Reader, desc ocispec.Descriptor) ([]byte, error) { + if desc.Size < 0 { + return nil, ErrInvalidDescriptorSize + } + buf := make([]byte, desc.Size) + + vr := NewVerifyReader(r, desc) + if n, err := io.ReadFull(vr, buf); err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + return nil, fmt.Errorf("read failed: expected content size of %d, got %d, for digest %s: %w", desc.Size, n, desc.Digest.String(), err) + } + return nil, fmt.Errorf("read failed: %w", err) + } + if err := vr.Verify(); err != nil { + return nil, err + } + return buf, nil +} + +// ensureEOF ensures the read operation ends with an EOF and no +// trailing data is present. +func ensureEOF(r io.Reader) error { + var peek [1]byte + _, err := io.ReadFull(r, peek[:]) + if err != io.EOF { + return ErrTrailingData + } + return nil +} diff --git a/vendor/oras.land/oras-go/v2/content/resolver.go b/vendor/oras.land/oras-go/v2/content/resolver.go new file mode 100644 index 000000000000..bc0fd8df9ce6 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/resolver.go @@ -0,0 +1,47 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package content provides implementations to access content stores. +package content + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver resolves reference tags. +type Resolver interface { + // Resolve resolves a reference to a descriptor. + Resolve(ctx context.Context, reference string) (ocispec.Descriptor, error) +} + +// Tagger tags reference tags. +type Tagger interface { + // Tag tags a descriptor with a reference string. + Tag(ctx context.Context, desc ocispec.Descriptor, reference string) error +} + +// TagResolver provides reference tag indexing services. +type TagResolver interface { + Tagger + Resolver +} + +// Untagger untags reference tags. +type Untagger interface { + // Untag untags the given reference string. + Untag(ctx context.Context, reference string) error +} diff --git a/vendor/oras.land/oras-go/v2/content/storage.go b/vendor/oras.land/oras-go/v2/content/storage.go new file mode 100644 index 000000000000..47c95d876937 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/content/storage.go @@ -0,0 +1,80 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Fetcher fetches content. +type Fetcher interface { + // Fetch fetches the content identified by the descriptor. + Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) +} + +// Pusher pushes content. +type Pusher interface { + // Push pushes the content, matching the expected descriptor. + // Reader is preferred to Writer so that the suitable buffer size can be + // chosen by the underlying implementation. Furthermore, the implementation + // can also do reflection on the Reader for more advanced I/O optimization. + Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error +} + +// Storage represents a content-addressable storage (CAS) where contents are +// accessed via Descriptors. +// The storage is designed to handle blobs of large sizes. +type Storage interface { + ReadOnlyStorage + Pusher +} + +// ReadOnlyStorage represents a read-only Storage. +type ReadOnlyStorage interface { + Fetcher + + // Exists returns true if the described content exists. + Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) +} + +// Deleter removes content. +// Deleter is an extension of Storage. +type Deleter interface { + // Delete removes the content identified by the descriptor. + Delete(ctx context.Context, target ocispec.Descriptor) error +} + +// FetchAll safely fetches the content described by the descriptor. +// The fetched content is verified against the size and the digest. +func FetchAll(ctx context.Context, fetcher Fetcher, desc ocispec.Descriptor) ([]byte, error) { + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return nil, err + } + defer rc.Close() + return ReadAll(rc, desc) +} + +// FetcherFunc is the basic Fetch method defined in Fetcher. +type FetcherFunc func(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch performs Fetch operation by the FetcherFunc. +func (fn FetcherFunc) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, target) +} diff --git a/vendor/oras.land/oras-go/v2/copy.go b/vendor/oras.land/oras-go/v2/copy.go new file mode 100644 index 000000000000..2f131a8c6a9a --- /dev/null +++ b/vendor/oras.land/oras-go/v2/copy.go @@ -0,0 +1,516 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "context" + "errors" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/descriptor" + "oras.land/oras-go/v2/internal/platform" + "oras.land/oras-go/v2/internal/registryutil" + "oras.land/oras-go/v2/internal/status" + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry" +) + +// defaultConcurrency is the default value of CopyGraphOptions.Concurrency. +const defaultConcurrency int = 3 // This value is consistent with dockerd and containerd. + +// SkipNode signals to stop copying a node. When returned from PreCopy the blob must exist in the target. +// This can be used to signal that a blob has been made available in the target repository by "Mount()" or some other technique. +var SkipNode = errors.New("skip node") + +// DefaultCopyOptions provides the default CopyOptions. +var DefaultCopyOptions CopyOptions = CopyOptions{ + CopyGraphOptions: DefaultCopyGraphOptions, +} + +// CopyOptions contains parameters for [oras.Copy]. +type CopyOptions struct { + CopyGraphOptions + // MapRoot maps the resolved root node to a desired root node for copy. + // When MapRoot is provided, the descriptor resolved from the source + // reference will be passed to MapRoot, and the mapped descriptor will be + // used as the root node for copy. + MapRoot func(ctx context.Context, src content.ReadOnlyStorage, root ocispec.Descriptor) (ocispec.Descriptor, error) +} + +// WithTargetPlatform configures opts.MapRoot to select the manifest whose +// platform matches the given platform. When MapRoot is provided, the platform +// selection will be applied on the mapped root node. +// - If the given platform is nil, no platform selection will be applied. +// - If the root node is a manifest, it will remain the same if platform +// matches, otherwise ErrNotFound will be returned. +// - If the root node is a manifest list, it will be mapped to the first +// matching manifest if exists, otherwise ErrNotFound will be returned. +// - Otherwise ErrUnsupported will be returned. +func (opts *CopyOptions) WithTargetPlatform(p *ocispec.Platform) { + if p == nil { + return + } + mapRoot := opts.MapRoot + opts.MapRoot = func(ctx context.Context, src content.ReadOnlyStorage, root ocispec.Descriptor) (desc ocispec.Descriptor, err error) { + if mapRoot != nil { + if root, err = mapRoot(ctx, src, root); err != nil { + return ocispec.Descriptor{}, err + } + } + return platform.SelectManifest(ctx, src, root, p) + } +} + +// defaultCopyMaxMetadataBytes is the default value of +// CopyGraphOptions.MaxMetadataBytes. +const defaultCopyMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB + +// DefaultCopyGraphOptions provides the default CopyGraphOptions. +var DefaultCopyGraphOptions CopyGraphOptions + +// CopyGraphOptions contains parameters for [oras.CopyGraph]. +type CopyGraphOptions struct { + // Concurrency limits the maximum number of concurrent copy tasks. + // If less than or equal to 0, a default (currently 3) is used. + Concurrency int + // MaxMetadataBytes limits the maximum size of the metadata that can be + // cached in the memory. + // If less than or equal to 0, a default (currently 4 MiB) is used. + MaxMetadataBytes int64 + // PreCopy handles the current descriptor before it is copied. PreCopy can + // return a SkipNode to signal that desc should be skipped when it already + // exists in the target. + PreCopy func(ctx context.Context, desc ocispec.Descriptor) error + // PostCopy handles the current descriptor after it is copied. + PostCopy func(ctx context.Context, desc ocispec.Descriptor) error + // OnCopySkipped will be called when the sub-DAG rooted by the current node + // is skipped. + OnCopySkipped func(ctx context.Context, desc ocispec.Descriptor) error + // MountFrom returns the candidate repositories that desc may be mounted from. + // The OCI references will be tried in turn. If mounting fails on all of them, + // then it falls back to a copy. + MountFrom func(ctx context.Context, desc ocispec.Descriptor) ([]string, error) + // OnMounted will be invoked when desc is mounted. + OnMounted func(ctx context.Context, desc ocispec.Descriptor) error + // FindSuccessors finds the successors of the current node. + // fetcher provides cached access to the source storage, and is suitable + // for fetching non-leaf nodes like manifests. Since anything fetched from + // fetcher will be cached in the memory, it is recommended to use original + // source storage to fetch large blobs. + // If FindSuccessors is nil, content.Successors will be used. + FindSuccessors func(ctx context.Context, fetcher content.Fetcher, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) +} + +// Copy copies a rooted directed acyclic graph (DAG) with the tagged root node +// in the source Target to the destination Target. +// The destination reference will be the same as the source reference if the +// destination reference is left blank. +// +// Returns the descriptor of the root node on successful copy. +func Copy(ctx context.Context, src ReadOnlyTarget, srcRef string, dst Target, dstRef string, opts CopyOptions) (ocispec.Descriptor, error) { + if src == nil { + return ocispec.Descriptor{}, errors.New("nil source target") + } + if dst == nil { + return ocispec.Descriptor{}, errors.New("nil destination target") + } + if dstRef == "" { + dstRef = srcRef + } + + // use caching proxy on non-leaf nodes + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultCopyMaxMetadataBytes + } + proxy := cas.NewProxyWithLimit(src, cas.NewMemory(), opts.MaxMetadataBytes) + root, err := resolveRoot(ctx, src, srcRef, proxy) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to resolve %s: %w", srcRef, err) + } + + if opts.MapRoot != nil { + proxy.StopCaching = true + root, err = opts.MapRoot(ctx, proxy, root) + if err != nil { + return ocispec.Descriptor{}, err + } + proxy.StopCaching = false + } + + if err := prepareCopy(ctx, dst, dstRef, proxy, root, &opts); err != nil { + return ocispec.Descriptor{}, err + } + + if err := copyGraph(ctx, src, dst, root, proxy, nil, nil, opts.CopyGraphOptions); err != nil { + return ocispec.Descriptor{}, err + } + + return root, nil +} + +// CopyGraph copies a rooted directed acyclic graph (DAG) from the source CAS to +// the destination CAS. +func CopyGraph(ctx context.Context, src content.ReadOnlyStorage, dst content.Storage, root ocispec.Descriptor, opts CopyGraphOptions) error { + return copyGraph(ctx, src, dst, root, nil, nil, nil, opts) +} + +// copyGraph copies a rooted directed acyclic graph (DAG) from the source CAS to +// the destination CAS with specified caching, concurrency limiter and tracker. +func copyGraph(ctx context.Context, src content.ReadOnlyStorage, dst content.Storage, root ocispec.Descriptor, + proxy *cas.Proxy, limiter *semaphore.Weighted, tracker *status.Tracker, opts CopyGraphOptions) error { + if proxy == nil { + // use caching proxy on non-leaf nodes + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultCopyMaxMetadataBytes + } + proxy = cas.NewProxyWithLimit(src, cas.NewMemory(), opts.MaxMetadataBytes) + } + if limiter == nil { + // if Concurrency is not set or invalid, use the default concurrency + if opts.Concurrency <= 0 { + opts.Concurrency = defaultConcurrency + } + limiter = semaphore.NewWeighted(int64(opts.Concurrency)) + } + if tracker == nil { + // track content status + tracker = status.NewTracker() + } + // if FindSuccessors is not provided, use the default one + if opts.FindSuccessors == nil { + opts.FindSuccessors = content.Successors + } + + // traverse the graph + var fn syncutil.GoFunc[ocispec.Descriptor] + fn = func(ctx context.Context, region *syncutil.LimitedRegion, desc ocispec.Descriptor) (err error) { + // skip the descriptor if other go routine is working on it + done, committed := tracker.TryCommit(desc) + if !committed { + return nil + } + defer func() { + if err == nil { + // mark the content as done on success + close(done) + } + }() + + // skip if a rooted sub-DAG exists + exists, err := dst.Exists(ctx, desc) + if err != nil { + return err + } + if exists { + if opts.OnCopySkipped != nil { + if err := opts.OnCopySkipped(ctx, desc); err != nil { + return err + } + } + return nil + } + + // find successors while non-leaf nodes will be fetched and cached + successors, err := opts.FindSuccessors(ctx, proxy, desc) + if err != nil { + return err + } + successors = removeForeignLayers(successors) + + if len(successors) != 0 { + // for non-leaf nodes, process successors and wait for them to complete + region.End() + if err := syncutil.Go(ctx, limiter, fn, successors...); err != nil { + return err + } + for _, node := range successors { + done, committed := tracker.TryCommit(node) + if committed { + return fmt.Errorf("%s: %s: successor not committed", desc.Digest, node.Digest) + } + select { + case <-done: + case <-ctx.Done(): + return ctx.Err() + } + } + if err := region.Start(); err != nil { + return err + } + } + + exists, err = proxy.Cache.Exists(ctx, desc) + if err != nil { + return err + } + if exists { + return copyNode(ctx, proxy.Cache, dst, desc, opts) + } + return mountOrCopyNode(ctx, src, dst, desc, opts) + } + + return syncutil.Go(ctx, limiter, fn, root) +} + +// mountOrCopyNode tries to mount the node, if not falls back to copying. +func mountOrCopyNode(ctx context.Context, src content.ReadOnlyStorage, dst content.Storage, desc ocispec.Descriptor, opts CopyGraphOptions) error { + // Need MountFrom and it must be a blob + if opts.MountFrom == nil || descriptor.IsManifest(desc) { + return copyNode(ctx, src, dst, desc, opts) + } + + mounter, ok := dst.(registry.Mounter) + if !ok { + // mounting is not supported by the destination + return copyNode(ctx, src, dst, desc, opts) + } + + sourceRepositories, err := opts.MountFrom(ctx, desc) + if err != nil { + // Technically this error is not fatal, we can still attempt to copy the node + // But for consistency with the other callbacks we bail out. + return err + } + + if len(sourceRepositories) == 0 { + return copyNode(ctx, src, dst, desc, opts) + } + + skipSource := errors.New("skip source") + for i, sourceRepository := range sourceRepositories { + // try mounting this source repository + var mountFailed bool + getContent := func() (io.ReadCloser, error) { + // the invocation of getContent indicates that mounting has failed + mountFailed = true + + if i < len(sourceRepositories)-1 { + // If this is not the last one, skip this source and try next one + // We want to return an error that we will test for from mounter.Mount() + return nil, skipSource + } + // this is the last iteration so we need to actually get the content and do the copy + // but first call the PreCopy function + if opts.PreCopy != nil { + if err := opts.PreCopy(ctx, desc); err != nil { + return nil, err + } + } + return src.Fetch(ctx, desc) + } + + // Mount or copy + if err := mounter.Mount(ctx, desc, sourceRepository, getContent); err != nil && !errors.Is(err, skipSource) { + return err + } + + if !mountFailed { + // mounted, success + if opts.OnMounted != nil { + if err := opts.OnMounted(ctx, desc); err != nil { + return err + } + } + return nil + } + } + + // we copied it + if opts.PostCopy != nil { + if err := opts.PostCopy(ctx, desc); err != nil { + return err + } + } + + return nil +} + +// doCopyNode copies a single content from the source CAS to the destination CAS. +func doCopyNode(ctx context.Context, src content.ReadOnlyStorage, dst content.Storage, desc ocispec.Descriptor) error { + rc, err := src.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + err = dst.Push(ctx, desc, rc) + if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return err + } + return nil +} + +// copyNode copies a single content from the source CAS to the destination CAS, +// and apply the given options. +func copyNode(ctx context.Context, src content.ReadOnlyStorage, dst content.Storage, desc ocispec.Descriptor, opts CopyGraphOptions) error { + if opts.PreCopy != nil { + if err := opts.PreCopy(ctx, desc); err != nil { + if err == SkipNode { + return nil + } + return err + } + } + + if err := doCopyNode(ctx, src, dst, desc); err != nil { + return err + } + + if opts.PostCopy != nil { + return opts.PostCopy(ctx, desc) + } + return nil +} + +// copyCachedNodeWithReference copies a single content with a reference from the +// source cache to the destination ReferencePusher. +func copyCachedNodeWithReference(ctx context.Context, src *cas.Proxy, dst registry.ReferencePusher, desc ocispec.Descriptor, dstRef string) error { + rc, err := src.FetchCached(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + err = dst.PushReference(ctx, desc, rc, dstRef) + if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return err + } + return nil +} + +// resolveRoot resolves the source reference to the root node. +func resolveRoot(ctx context.Context, src ReadOnlyTarget, srcRef string, proxy *cas.Proxy) (ocispec.Descriptor, error) { + refFetcher, ok := src.(registry.ReferenceFetcher) + if !ok { + return src.Resolve(ctx, srcRef) + } + + // optimize performance for ReferenceFetcher targets + refProxy := ®istryutil.Proxy{ + ReferenceFetcher: refFetcher, + Proxy: proxy, + } + root, rc, err := refProxy.FetchReference(ctx, srcRef) + if err != nil { + return ocispec.Descriptor{}, err + } + defer rc.Close() + // cache root if it is a non-leaf node + fetcher := content.FetcherFunc(func(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + if content.Equal(target, root) { + return rc, nil + } + return nil, errors.New("fetching only root node expected") + }) + if _, err = content.Successors(ctx, fetcher, root); err != nil { + return ocispec.Descriptor{}, err + } + + // TODO: optimize special case where root is a leaf node (i.e. a blob) + // and dst is a ReferencePusher. + return root, nil +} + +// prepareCopy prepares the hooks for copy. +func prepareCopy(ctx context.Context, dst Target, dstRef string, proxy *cas.Proxy, root ocispec.Descriptor, opts *CopyOptions) error { + if refPusher, ok := dst.(registry.ReferencePusher); ok { + // optimize performance for ReferencePusher targets + preCopy := opts.PreCopy + opts.PreCopy = func(ctx context.Context, desc ocispec.Descriptor) error { + if preCopy != nil { + if err := preCopy(ctx, desc); err != nil { + return err + } + } + if !content.Equal(desc, root) { + // for non-root node, do nothing + return nil + } + + // for root node, prepare optimized copy + if err := copyCachedNodeWithReference(ctx, proxy, refPusher, desc, dstRef); err != nil { + return err + } + if opts.PostCopy != nil { + if err := opts.PostCopy(ctx, desc); err != nil { + return err + } + } + // skip the regular copy workflow + return SkipNode + } + } else { + postCopy := opts.PostCopy + opts.PostCopy = func(ctx context.Context, desc ocispec.Descriptor) error { + if content.Equal(desc, root) { + // for root node, tag it after copying it + if err := dst.Tag(ctx, root, dstRef); err != nil { + return err + } + } + if postCopy != nil { + return postCopy(ctx, desc) + } + return nil + } + } + + onCopySkipped := opts.OnCopySkipped + opts.OnCopySkipped = func(ctx context.Context, desc ocispec.Descriptor) error { + if !content.Equal(desc, root) { + if onCopySkipped != nil { + return onCopySkipped(ctx, desc) + } + return nil + } + + // enforce tagging when the skipped node is root + if refPusher, ok := dst.(registry.ReferencePusher); ok { + // NOTE: refPusher tags the node by copying it with the reference, + // so onCopySkipped shouldn't be invoked in this case + return copyCachedNodeWithReference(ctx, proxy, refPusher, desc, dstRef) + } + + // invoke onCopySkipped before tagging + if onCopySkipped != nil { + if err := onCopySkipped(ctx, desc); err != nil { + return err + } + } + return dst.Tag(ctx, root, dstRef) + } + + return nil +} + +// removeForeignLayers in-place removes all foreign layers in the given slice. +func removeForeignLayers(descs []ocispec.Descriptor) []ocispec.Descriptor { + var j int + for i, desc := range descs { + if !descriptor.IsForeignLayer(desc) { + if i != j { + descs[j] = desc + } + j++ + } + } + return descs[:j] +} diff --git a/vendor/oras.land/oras-go/v2/errdef/errors.go b/vendor/oras.land/oras-go/v2/errdef/errors.go new file mode 100644 index 000000000000..7adb44b173f8 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/errdef/errors.go @@ -0,0 +1,31 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errdef + +import "errors" + +// Common errors used in ORAS +var ( + ErrAlreadyExists = errors.New("already exists") + ErrInvalidDigest = errors.New("invalid digest") + ErrInvalidReference = errors.New("invalid reference") + ErrInvalidMediaType = errors.New("invalid media type") + ErrMissingReference = errors.New("missing reference") + ErrNotFound = errors.New("not found") + ErrSizeExceedsLimit = errors.New("size exceeds limit") + ErrUnsupported = errors.New("unsupported") + ErrUnsupportedVersion = errors.New("unsupported version") +) diff --git a/vendor/oras.land/oras-go/v2/extendedcopy.go b/vendor/oras.land/oras-go/v2/extendedcopy.go new file mode 100644 index 000000000000..49b6264ec642 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/extendedcopy.go @@ -0,0 +1,389 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "context" + "encoding/json" + "errors" + "regexp" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/container/set" + "oras.land/oras-go/v2/internal/copyutil" + "oras.land/oras-go/v2/internal/descriptor" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/spec" + "oras.land/oras-go/v2/internal/status" + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry" +) + +// DefaultExtendedCopyOptions provides the default ExtendedCopyOptions. +var DefaultExtendedCopyOptions ExtendedCopyOptions = ExtendedCopyOptions{ + ExtendedCopyGraphOptions: DefaultExtendedCopyGraphOptions, +} + +// ExtendedCopyOptions contains parameters for [oras.ExtendedCopy]. +type ExtendedCopyOptions struct { + ExtendedCopyGraphOptions +} + +// DefaultExtendedCopyGraphOptions provides the default ExtendedCopyGraphOptions. +var DefaultExtendedCopyGraphOptions ExtendedCopyGraphOptions = ExtendedCopyGraphOptions{ + CopyGraphOptions: DefaultCopyGraphOptions, +} + +// ExtendedCopyGraphOptions contains parameters for [oras.ExtendedCopyGraph]. +type ExtendedCopyGraphOptions struct { + CopyGraphOptions + // Depth limits the maximum depth of the directed acyclic graph (DAG) that + // will be extended-copied. + // If Depth is no specified, or the specified value is less than or + // equal to 0, the depth limit will be considered as infinity. + Depth int + // FindPredecessors finds the predecessors of the current node. + // If FindPredecessors is nil, src.Predecessors will be adapted and used. + FindPredecessors func(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) +} + +// ExtendedCopy copies the directed acyclic graph (DAG) that are reachable from +// the given tagged node from the source GraphTarget to the destination Target. +// The destination reference will be the same as the source reference if the +// destination reference is left blank. +// +// Returns the descriptor of the tagged node on successful copy. +func ExtendedCopy(ctx context.Context, src ReadOnlyGraphTarget, srcRef string, dst Target, dstRef string, opts ExtendedCopyOptions) (ocispec.Descriptor, error) { + if src == nil { + return ocispec.Descriptor{}, errors.New("nil source graph target") + } + if dst == nil { + return ocispec.Descriptor{}, errors.New("nil destination target") + } + if dstRef == "" { + dstRef = srcRef + } + + node, err := src.Resolve(ctx, srcRef) + if err != nil { + return ocispec.Descriptor{}, err + } + + if err := ExtendedCopyGraph(ctx, src, dst, node, opts.ExtendedCopyGraphOptions); err != nil { + return ocispec.Descriptor{}, err + } + + if err := dst.Tag(ctx, node, dstRef); err != nil { + return ocispec.Descriptor{}, err + } + + return node, nil +} + +// ExtendedCopyGraph copies the directed acyclic graph (DAG) that are reachable +// from the given node from the source GraphStorage to the destination Storage. +func ExtendedCopyGraph(ctx context.Context, src content.ReadOnlyGraphStorage, dst content.Storage, node ocispec.Descriptor, opts ExtendedCopyGraphOptions) error { + roots, err := findRoots(ctx, src, node, opts) + if err != nil { + return err + } + + // if Concurrency is not set or invalid, use the default concurrency + if opts.Concurrency <= 0 { + opts.Concurrency = defaultConcurrency + } + limiter := semaphore.NewWeighted(int64(opts.Concurrency)) + // use caching proxy on non-leaf nodes + if opts.MaxMetadataBytes <= 0 { + opts.MaxMetadataBytes = defaultCopyMaxMetadataBytes + } + proxy := cas.NewProxyWithLimit(src, cas.NewMemory(), opts.MaxMetadataBytes) + // track content status + tracker := status.NewTracker() + + // copy the sub-DAGs rooted by the root nodes + return syncutil.Go(ctx, limiter, func(ctx context.Context, region *syncutil.LimitedRegion, root ocispec.Descriptor) error { + // As a root can be a predecessor of other roots, release the limit here + // for dispatching, to avoid dead locks where predecessor roots are + // handled first and are waiting for its successors to complete. + region.End() + if err := copyGraph(ctx, src, dst, root, proxy, limiter, tracker, opts.CopyGraphOptions); err != nil { + return err + } + return region.Start() + }, roots...) +} + +// findRoots finds the root nodes reachable from the given node through a +// depth-first search. +func findRoots(ctx context.Context, storage content.ReadOnlyGraphStorage, node ocispec.Descriptor, opts ExtendedCopyGraphOptions) ([]ocispec.Descriptor, error) { + visited := set.New[descriptor.Descriptor]() + rootMap := make(map[descriptor.Descriptor]ocispec.Descriptor) + addRoot := func(key descriptor.Descriptor, val ocispec.Descriptor) { + if _, exists := rootMap[key]; !exists { + rootMap[key] = val + } + } + + // if FindPredecessors is not provided, use the default one + if opts.FindPredecessors == nil { + opts.FindPredecessors = func(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return src.Predecessors(ctx, desc) + } + } + + var stack copyutil.Stack + // push the initial node to the stack, set the depth to 0 + stack.Push(copyutil.NodeInfo{Node: node, Depth: 0}) + for { + current, ok := stack.Pop() + if !ok { + // empty stack + break + } + currentNode := current.Node + currentKey := descriptor.FromOCI(currentNode) + + if visited.Contains(currentKey) { + // skip the current node if it has been visited + continue + } + visited.Add(currentKey) + + // stop finding predecessors if the target depth is reached + if opts.Depth > 0 && current.Depth == opts.Depth { + addRoot(currentKey, currentNode) + continue + } + + predecessors, err := opts.FindPredecessors(ctx, storage, currentNode) + if err != nil { + return nil, err + } + + // The current node has no predecessor node, + // which means it is a root node of a sub-DAG. + if len(predecessors) == 0 { + addRoot(currentKey, currentNode) + continue + } + + // The current node has predecessor nodes, which means it is NOT a root node. + // Push the predecessor nodes to the stack and keep finding from there. + for _, predecessor := range predecessors { + predecessorKey := descriptor.FromOCI(predecessor) + if !visited.Contains(predecessorKey) { + // push the predecessor node with increased depth + stack.Push(copyutil.NodeInfo{Node: predecessor, Depth: current.Depth + 1}) + } + } + } + + roots := make([]ocispec.Descriptor, 0, len(rootMap)) + for _, root := range rootMap { + roots = append(roots, root) + } + return roots, nil +} + +// FilterAnnotation configures opts.FindPredecessors to filter the predecessors +// whose annotation matches a given regex pattern. +// +// A predecessor is kept if key is in its annotations and the annotation value +// matches regex. +// If regex is nil, predecessors whose annotations contain key will be kept, +// no matter of the annotation value. +// +// For performance consideration, when using both FilterArtifactType and +// FilterAnnotation, it's recommended to call FilterArtifactType first. +func (opts *ExtendedCopyGraphOptions) FilterAnnotation(key string, regex *regexp.Regexp) { + keep := func(desc ocispec.Descriptor) bool { + value, ok := desc.Annotations[key] + return ok && (regex == nil || regex.MatchString(value)) + } + + fp := opts.FindPredecessors + opts.FindPredecessors = func(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var predecessors []ocispec.Descriptor + var err error + if fp == nil { + if rf, ok := src.(registry.ReferrerLister); ok { + // if src is a ReferrerLister, use Referrers() for possible memory saving + if err := rf.Referrers(ctx, desc, "", func(referrers []ocispec.Descriptor) error { + // for each page of the results, filter the referrers + for _, r := range referrers { + if keep(r) { + predecessors = append(predecessors, r) + } + } + return nil + }); err != nil { + return nil, err + } + return predecessors, nil + } + predecessors, err = src.Predecessors(ctx, desc) + } else { + predecessors, err = fp(ctx, src, desc) + } + if err != nil { + return nil, err + } + + // Predecessor descriptors that are not from Referrers API are not + // guaranteed to include the annotations of the corresponding manifests. + var kept []ocispec.Descriptor + for _, p := range predecessors { + if p.Annotations == nil { + // If the annotations are not present in the descriptors, + // fetch it from the manifest content. + switch p.MediaType { + case docker.MediaTypeManifest, ocispec.MediaTypeImageManifest, + docker.MediaTypeManifestList, ocispec.MediaTypeImageIndex, + spec.MediaTypeArtifactManifest: + annotations, err := fetchAnnotations(ctx, src, p) + if err != nil { + return nil, err + } + p.Annotations = annotations + } + } + if keep(p) { + kept = append(kept, p) + } + } + return kept, nil + } +} + +// fetchAnnotations fetches the annotations of the manifest described by desc. +func fetchAnnotations(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) (map[string]string, error) { + rc, err := src.Fetch(ctx, desc) + if err != nil { + return nil, err + } + defer rc.Close() + + var manifest struct { + Annotations map[string]string `json:"annotations"` + } + if err := json.NewDecoder(rc).Decode(&manifest); err != nil { + return nil, err + } + if manifest.Annotations == nil { + // to differentiate with nil + return make(map[string]string), nil + } + return manifest.Annotations, nil +} + +// FilterArtifactType configures opts.FindPredecessors to filter the +// predecessors whose artifact type matches a given regex pattern. +// +// A predecessor is kept if its artifact type matches regex. +// If regex is nil, all predecessors will be kept. +// +// For performance consideration, when using both FilterArtifactType and +// FilterAnnotation, it's recommended to call FilterArtifactType first. +func (opts *ExtendedCopyGraphOptions) FilterArtifactType(regex *regexp.Regexp) { + if regex == nil { + return + } + keep := func(desc ocispec.Descriptor) bool { + return regex.MatchString(desc.ArtifactType) + } + + fp := opts.FindPredecessors + opts.FindPredecessors = func(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var predecessors []ocispec.Descriptor + var err error + if fp == nil { + if rf, ok := src.(registry.ReferrerLister); ok { + // if src is a ReferrerLister, use Referrers() for possible memory saving + if err := rf.Referrers(ctx, desc, "", func(referrers []ocispec.Descriptor) error { + // for each page of the results, filter the referrers + for _, r := range referrers { + if keep(r) { + predecessors = append(predecessors, r) + } + } + return nil + }); err != nil { + return nil, err + } + return predecessors, nil + } + predecessors, err = src.Predecessors(ctx, desc) + } else { + predecessors, err = fp(ctx, src, desc) + } + if err != nil { + return nil, err + } + + // predecessor descriptors that are not from Referrers API are not + // guaranteed to include the artifact type of the corresponding + // manifests. + var kept []ocispec.Descriptor + for _, p := range predecessors { + if p.ArtifactType == "" { + // if the artifact type is not present in the descriptors, + // fetch it from the manifest content. + switch p.MediaType { + case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest: + artifactType, err := fetchArtifactType(ctx, src, p) + if err != nil { + return nil, err + } + p.ArtifactType = artifactType + } + } + if keep(p) { + kept = append(kept, p) + } + } + return kept, nil + } +} + +// fetchArtifactType fetches the artifact type of the manifest described by desc. +func fetchArtifactType(ctx context.Context, src content.ReadOnlyGraphStorage, desc ocispec.Descriptor) (string, error) { + rc, err := src.Fetch(ctx, desc) + if err != nil { + return "", err + } + defer rc.Close() + + switch desc.MediaType { + case spec.MediaTypeArtifactManifest: + var manifest spec.Artifact + if err := json.NewDecoder(rc).Decode(&manifest); err != nil { + return "", err + } + return manifest.ArtifactType, nil + case ocispec.MediaTypeImageManifest: + var manifest ocispec.Manifest + if err := json.NewDecoder(rc).Decode(&manifest); err != nil { + return "", err + } + return manifest.Config.MediaType, nil + default: + return "", nil + } +} diff --git a/vendor/oras.land/oras-go/v2/internal/cas/memory.go b/vendor/oras.land/oras-go/v2/internal/cas/memory.go new file mode 100644 index 000000000000..7e358e136c31 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/cas/memory.go @@ -0,0 +1,88 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cas + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + contentpkg "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/descriptor" +) + +// Memory is a memory based CAS. +type Memory struct { + content sync.Map // map[descriptor.Descriptor][]byte +} + +// NewMemory creates a new Memory CAS. +func NewMemory() *Memory { + return &Memory{} +} + +// Fetch fetches the content identified by the descriptor. +func (m *Memory) Fetch(_ context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + key := descriptor.FromOCI(target) + content, exists := m.content.Load(key) + if !exists { + return nil, fmt.Errorf("%s: %s: %w", key.Digest, key.MediaType, errdef.ErrNotFound) + } + return io.NopCloser(bytes.NewReader(content.([]byte))), nil +} + +// Push pushes the content, matching the expected descriptor. +func (m *Memory) Push(_ context.Context, expected ocispec.Descriptor, content io.Reader) error { + key := descriptor.FromOCI(expected) + + // check if the content exists in advance to avoid reading from the content. + if _, exists := m.content.Load(key); exists { + return fmt.Errorf("%s: %s: %w", key.Digest, key.MediaType, errdef.ErrAlreadyExists) + } + + // read and try to store the content. + value, err := contentpkg.ReadAll(content, expected) + if err != nil { + return err + } + if _, exists := m.content.LoadOrStore(key, value); exists { + return fmt.Errorf("%s: %s: %w", key.Digest, key.MediaType, errdef.ErrAlreadyExists) + } + return nil +} + +// Exists returns true if the described content exists. +func (m *Memory) Exists(_ context.Context, target ocispec.Descriptor) (bool, error) { + key := descriptor.FromOCI(target) + _, exists := m.content.Load(key) + return exists, nil +} + +// Map dumps the memory into a built-in map structure. +// Like other operations, calling Map() is go-routine safe. However, it does not +// necessarily correspond to any consistent snapshot of the storage contents. +func (m *Memory) Map() map[descriptor.Descriptor][]byte { + res := make(map[descriptor.Descriptor][]byte) + m.content.Range(func(key, value interface{}) bool { + res[key.(descriptor.Descriptor)] = value.([]byte) + return true + }) + return res +} diff --git a/vendor/oras.land/oras-go/v2/internal/cas/proxy.go b/vendor/oras.land/oras-go/v2/internal/cas/proxy.go new file mode 100644 index 000000000000..ada5f94e0083 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/cas/proxy.go @@ -0,0 +1,125 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cas + +import ( + "context" + "io" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/internal/ioutil" +) + +// Proxy is a caching proxy for the storage. +// The first fetch call of a described content will read from the remote and +// cache the fetched content. +// The subsequent fetch call will read from the local cache. +type Proxy struct { + content.ReadOnlyStorage + Cache content.Storage + StopCaching bool +} + +// NewProxy creates a proxy for the `base` storage, using the `cache` storage as +// the cache. +func NewProxy(base content.ReadOnlyStorage, cache content.Storage) *Proxy { + return &Proxy{ + ReadOnlyStorage: base, + Cache: cache, + } +} + +// NewProxyWithLimit creates a proxy for the `base` storage, using the `cache` +// storage with a push size limit as the cache. +func NewProxyWithLimit(base content.ReadOnlyStorage, cache content.Storage, pushLimit int64) *Proxy { + limitedCache := content.LimitStorage(cache, pushLimit) + return &Proxy{ + ReadOnlyStorage: base, + Cache: limitedCache, + } +} + +// Fetch fetches the content identified by the descriptor. +func (p *Proxy) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + if p.StopCaching { + return p.FetchCached(ctx, target) + } + + rc, err := p.Cache.Fetch(ctx, target) + if err == nil { + return rc, nil + } + + rc, err = p.ReadOnlyStorage.Fetch(ctx, target) + if err != nil { + return nil, err + } + pr, pw := io.Pipe() + var wg sync.WaitGroup + wg.Add(1) + var pushErr error + go func() { + defer wg.Done() + pushErr = p.Cache.Push(ctx, target, pr) + if pushErr != nil { + pr.CloseWithError(pushErr) + } + }() + closer := ioutil.CloserFunc(func() error { + rcErr := rc.Close() + if err := pw.Close(); err != nil { + return err + } + wg.Wait() + if pushErr != nil { + return pushErr + } + return rcErr + }) + + return struct { + io.Reader + io.Closer + }{ + Reader: io.TeeReader(rc, pw), + Closer: closer, + }, nil +} + +// FetchCached fetches the content identified by the descriptor. +// If the content is not cached, it will be fetched from the remote without +// caching. +func (p *Proxy) FetchCached(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + exists, err := p.Cache.Exists(ctx, target) + if err != nil { + return nil, err + } + if exists { + return p.Cache.Fetch(ctx, target) + } + return p.ReadOnlyStorage.Fetch(ctx, target) +} + +// Exists returns true if the described content exists. +func (p *Proxy) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { + exists, err := p.Cache.Exists(ctx, target) + if err == nil && exists { + return true, nil + } + return p.ReadOnlyStorage.Exists(ctx, target) +} diff --git a/vendor/oras.land/oras-go/v2/internal/container/set/set.go b/vendor/oras.land/oras-go/v2/internal/container/set/set.go new file mode 100644 index 000000000000..07c96d47d15b --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/container/set/set.go @@ -0,0 +1,40 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +// Set represents a set data structure. +type Set[T comparable] map[T]struct{} + +// New returns an initialized set. +func New[T comparable]() Set[T] { + return make(Set[T]) +} + +// Add adds item into the set s. +func (s Set[T]) Add(item T) { + s[item] = struct{}{} +} + +// Contains returns true if the set s contains item. +func (s Set[T]) Contains(item T) bool { + _, ok := s[item] + return ok +} + +// Delete deletes an item from the set. +func (s Set[T]) Delete(item T) { + delete(s, item) +} diff --git a/vendor/oras.land/oras-go/v2/internal/copyutil/stack.go b/vendor/oras.land/oras-go/v2/internal/copyutil/stack.go new file mode 100644 index 000000000000..69412b0099be --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/copyutil/stack.go @@ -0,0 +1,55 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package copyutil + +import ( + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NodeInfo represents information of a node that is being visited in +// ExtendedCopy. +type NodeInfo struct { + // Node represents a node in the graph. + Node ocispec.Descriptor + // Depth represents the depth of the node in the graph. + Depth int +} + +// Stack represents a stack data structure that is used in ExtendedCopy for +// storing node information. +type Stack []NodeInfo + +// IsEmpty returns true if the stack is empty, otherwise returns false. +func (s *Stack) IsEmpty() bool { + return len(*s) == 0 +} + +// Push pushes an item to the stack. +func (s *Stack) Push(i NodeInfo) { + *s = append(*s, i) +} + +// Pop pops the top item out of the stack. +func (s *Stack) Pop() (NodeInfo, bool) { + if s.IsEmpty() { + return NodeInfo{}, false + } + + last := len(*s) - 1 + top := (*s)[last] + *s = (*s)[:last] + return top, true +} diff --git a/vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go b/vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go new file mode 100644 index 000000000000..b9b339c0487b --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go @@ -0,0 +1,89 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package descriptor + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/spec" +) + +// DefaultMediaType is the media type used when no media type is specified. +const DefaultMediaType string = "application/octet-stream" + +// Descriptor contains the minimun information to describe the disposition of +// targeted content. +// Since it only has strings and integers, Descriptor is a comparable struct. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` +} + +// Empty is an empty descriptor +var Empty Descriptor + +// FromOCI shrinks the OCI descriptor to the minimum. +func FromOCI(desc ocispec.Descriptor) Descriptor { + return Descriptor{ + MediaType: desc.MediaType, + Digest: desc.Digest, + Size: desc.Size, + } +} + +// IsForeignLayer checks if a descriptor describes a foreign layer. +func IsForeignLayer(desc ocispec.Descriptor) bool { + switch desc.MediaType { + case ocispec.MediaTypeImageLayerNonDistributable, + ocispec.MediaTypeImageLayerNonDistributableGzip, + ocispec.MediaTypeImageLayerNonDistributableZstd, + docker.MediaTypeForeignLayer: + return true + default: + return false + } +} + +// IsManifest checks if a descriptor describes a manifest. +func IsManifest(desc ocispec.Descriptor) bool { + switch desc.MediaType { + case docker.MediaTypeManifest, + docker.MediaTypeManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, + spec.MediaTypeArtifactManifest: + return true + default: + return false + } +} + +// Plain returns a plain descriptor that contains only MediaType, Digest and +// Size. +func Plain(desc ocispec.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: desc.MediaType, + Digest: desc.Digest, + Size: desc.Size, + } +} diff --git a/vendor/oras.land/oras-go/v2/internal/docker/mediatype.go b/vendor/oras.land/oras-go/v2/internal/docker/mediatype.go new file mode 100644 index 000000000000..76a4ba9ed9d5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/docker/mediatype.go @@ -0,0 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +// docker media types +const ( + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) diff --git a/vendor/oras.land/oras-go/v2/internal/graph/memory.go b/vendor/oras.land/oras-go/v2/internal/graph/memory.go new file mode 100644 index 000000000000..016e5f96a850 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/graph/memory.go @@ -0,0 +1,201 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package graph + +import ( + "context" + "errors" + "sync" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/container/set" + "oras.land/oras-go/v2/internal/descriptor" + "oras.land/oras-go/v2/internal/status" + "oras.land/oras-go/v2/internal/syncutil" +) + +// Memory is a memory based PredecessorFinder. +type Memory struct { + // nodes has the following properties and behaviors: + // 1. a node exists in Memory.nodes if and only if it exists in the memory + // 2. Memory.nodes saves the ocispec.Descriptor map keys, which are used by + // the other fields. + nodes map[descriptor.Descriptor]ocispec.Descriptor + + // predecessors has the following properties and behaviors: + // 1. a node exists in Memory.predecessors if it has at least one predecessor + // in the memory, regardless of whether or not the node itself exists in + // the memory. + // 2. a node does not exist in Memory.predecessors, if it doesn't have any predecessors + // in the memory. + predecessors map[descriptor.Descriptor]set.Set[descriptor.Descriptor] + + // successors has the following properties and behaviors: + // 1. a node exists in Memory.successors if and only if it exists in the memory. + // 2. a node's entry in Memory.successors is always consistent with the actual + // content of the node, regardless of whether or not each successor exists + // in the memory. + successors map[descriptor.Descriptor]set.Set[descriptor.Descriptor] + + lock sync.RWMutex +} + +// NewMemory creates a new memory PredecessorFinder. +func NewMemory() *Memory { + return &Memory{ + nodes: make(map[descriptor.Descriptor]ocispec.Descriptor), + predecessors: make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]), + successors: make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]), + } +} + +// Index indexes predecessors for each direct successor of the given node. +func (m *Memory) Index(ctx context.Context, fetcher content.Fetcher, node ocispec.Descriptor) error { + _, err := m.index(ctx, fetcher, node) + return err +} + +// Index indexes predecessors for all the successors of the given node. +func (m *Memory) IndexAll(ctx context.Context, fetcher content.Fetcher, node ocispec.Descriptor) error { + // track content status + tracker := status.NewTracker() + var fn syncutil.GoFunc[ocispec.Descriptor] + fn = func(ctx context.Context, region *syncutil.LimitedRegion, desc ocispec.Descriptor) error { + // skip the node if other go routine is working on it + _, committed := tracker.TryCommit(desc) + if !committed { + return nil + } + successors, err := m.index(ctx, fetcher, desc) + if err != nil { + if errors.Is(err, errdef.ErrNotFound) { + // skip the node if it does not exist + return nil + } + return err + } + if len(successors) > 0 { + // traverse and index successors + return syncutil.Go(ctx, nil, fn, successors...) + } + return nil + } + return syncutil.Go(ctx, nil, fn, node) +} + +// Predecessors returns the nodes directly pointing to the current node. +// Predecessors returns nil without error if the node does not exists in the +// store. Like other operations, calling Predecessors() is go-routine safe. +// However, it does not necessarily correspond to any consistent snapshot of +// the stored contents. +func (m *Memory) Predecessors(_ context.Context, node ocispec.Descriptor) ([]ocispec.Descriptor, error) { + m.lock.RLock() + defer m.lock.RUnlock() + + key := descriptor.FromOCI(node) + set, exists := m.predecessors[key] + if !exists { + return nil, nil + } + var res []ocispec.Descriptor + for k := range set { + res = append(res, m.nodes[k]) + } + return res, nil +} + +// Remove removes the node from its predecessors and successors, and returns the +// dangling root nodes caused by the deletion. +func (m *Memory) Remove(node ocispec.Descriptor) []ocispec.Descriptor { + m.lock.Lock() + defer m.lock.Unlock() + + nodeKey := descriptor.FromOCI(node) + var danglings []ocispec.Descriptor + // remove the node from its successors' predecessor list + for successorKey := range m.successors[nodeKey] { + predecessorEntry := m.predecessors[successorKey] + predecessorEntry.Delete(nodeKey) + + // if none of the predecessors of the node still exists, we remove the + // predecessors entry and return it as a dangling node. Otherwise, we do + // not remove the entry. + if len(predecessorEntry) == 0 { + delete(m.predecessors, successorKey) + if _, exists := m.nodes[successorKey]; exists { + danglings = append(danglings, m.nodes[successorKey]) + } + } + } + delete(m.successors, nodeKey) + delete(m.nodes, nodeKey) + return danglings +} + +// DigestSet returns the set of node digest in memory. +func (m *Memory) DigestSet() set.Set[digest.Digest] { + m.lock.RLock() + defer m.lock.RUnlock() + + s := set.New[digest.Digest]() + for desc := range m.nodes { + s.Add(desc.Digest) + } + return s +} + +// index indexes predecessors for each direct successor of the given node. +func (m *Memory) index(ctx context.Context, fetcher content.Fetcher, node ocispec.Descriptor) ([]ocispec.Descriptor, error) { + successors, err := content.Successors(ctx, fetcher, node) + if err != nil { + return nil, err + } + m.lock.Lock() + defer m.lock.Unlock() + + // index the node + nodeKey := descriptor.FromOCI(node) + m.nodes[nodeKey] = node + + // for each successor, put it into the node's successors list, and + // put node into the succeesor's predecessors list + successorSet := set.New[descriptor.Descriptor]() + m.successors[nodeKey] = successorSet + for _, successor := range successors { + successorKey := descriptor.FromOCI(successor) + successorSet.Add(successorKey) + predecessorSet, exists := m.predecessors[successorKey] + if !exists { + predecessorSet = set.New[descriptor.Descriptor]() + m.predecessors[successorKey] = predecessorSet + } + predecessorSet.Add(nodeKey) + } + return successors, nil +} + +// Exists checks if the node exists in the graph +func (m *Memory) Exists(node ocispec.Descriptor) bool { + m.lock.RLock() + defer m.lock.RUnlock() + + nodeKey := descriptor.FromOCI(node) + _, exists := m.nodes[nodeKey] + return exists +} diff --git a/vendor/oras.land/oras-go/v2/internal/httputil/seek.go b/vendor/oras.land/oras-go/v2/internal/httputil/seek.go new file mode 100644 index 000000000000..3fa14e2dd351 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/httputil/seek.go @@ -0,0 +1,116 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "errors" + "fmt" + "io" + "net/http" +) + +// Client is an interface for a HTTP client. +// This interface is defined inside this package to prevent potential import +// loop. +type Client interface { + // Do sends an HTTP request and returns an HTTP response. + Do(*http.Request) (*http.Response, error) +} + +// readSeekCloser seeks http body by starting new connections. +type readSeekCloser struct { + client Client + req *http.Request + rc io.ReadCloser + size int64 + offset int64 + closed bool +} + +// NewReadSeekCloser returns a seeker to make the HTTP response seekable. +// Callers should ensure that the server supports Range request. +func NewReadSeekCloser(client Client, req *http.Request, respBody io.ReadCloser, size int64) io.ReadSeekCloser { + return &readSeekCloser{ + client: client, + req: req, + rc: respBody, + size: size, + } +} + +// Read reads the content body and counts offset. +func (rsc *readSeekCloser) Read(p []byte) (n int, err error) { + if rsc.closed { + return 0, errors.New("read: already closed") + } + n, err = rsc.rc.Read(p) + rsc.offset += int64(n) + return +} + +// Seek starts a new connection to the remote for reading if position changes. +func (rsc *readSeekCloser) Seek(offset int64, whence int) (int64, error) { + if rsc.closed { + return 0, errors.New("seek: already closed") + } + switch whence { + case io.SeekCurrent: + offset += rsc.offset + case io.SeekStart: + // no-op + case io.SeekEnd: + offset += rsc.size + default: + return 0, errors.New("seek: invalid whence") + } + if offset < 0 { + return 0, errors.New("seek: an attempt was made to move the pointer before the beginning of the content") + } + if offset == rsc.offset { + return offset, nil + } + if offset >= rsc.size { + rsc.rc.Close() + rsc.rc = http.NoBody + rsc.offset = offset + return offset, nil + } + + req := rsc.req.Clone(rsc.req.Context()) + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, rsc.size-1)) + resp, err := rsc.client.Do(req) + if err != nil { + return 0, fmt.Errorf("seek: %s %q: %w", req.Method, req.URL, err) + } + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return 0, fmt.Errorf("seek: %s %q: unexpected status code %d", resp.Request.Method, resp.Request.URL, resp.StatusCode) + } + + rsc.rc.Close() + rsc.rc = resp.Body + rsc.offset = offset + return offset, nil +} + +// Close closes the content body. +func (rsc *readSeekCloser) Close() error { + if rsc.closed { + return nil + } + rsc.closed = true + return rsc.rc.Close() +} diff --git a/vendor/oras.land/oras-go/v2/internal/interfaces/registry.go b/vendor/oras.land/oras-go/v2/internal/interfaces/registry.go new file mode 100644 index 000000000000..05600148e6bf --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/interfaces/registry.go @@ -0,0 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interfaces + +import "oras.land/oras-go/v2/registry" + +// ReferenceParser provides reference parsing. +type ReferenceParser interface { + // ParseReference parses a reference to a fully qualified reference. + ParseReference(reference string) (registry.Reference, error) +} diff --git a/vendor/oras.land/oras-go/v2/internal/ioutil/io.go b/vendor/oras.land/oras-go/v2/internal/ioutil/io.go new file mode 100644 index 000000000000..de41bda909f8 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/ioutil/io.go @@ -0,0 +1,66 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "fmt" + "io" + "reflect" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" +) + +// CloserFunc is the basic Close method defined in io.Closer. +type CloserFunc func() error + +// Close performs close operation by the CloserFunc. +func (fn CloserFunc) Close() error { + return fn() +} + +// CopyBuffer copies from src to dst through the provided buffer +// until either EOF is reached on src, or an error occurs. +// The copied content is verified against the size and the digest. +func CopyBuffer(dst io.Writer, src io.Reader, buf []byte, desc ocispec.Descriptor) error { + // verify while copying + vr := content.NewVerifyReader(src, desc) + if _, err := io.CopyBuffer(dst, vr, buf); err != nil { + return fmt.Errorf("copy failed: %w", err) + } + return vr.Verify() +} + +// Types returned by `io.NopCloser()`. +var ( + nopCloserType = reflect.TypeOf(io.NopCloser(nil)) + nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct { + io.Reader + io.WriterTo + }{})) +) + +// UnwrapNopCloser unwraps the reader wrapped by `io.NopCloser()`. +// Similar implementation can be found in the built-in package `net/http`. +// Reference: https://github.com/golang/go/blob/go1.22.1/src/net/http/transfer.go#L1090-L1105 +func UnwrapNopCloser(r io.Reader) io.Reader { + switch reflect.TypeOf(r) { + case nopCloserType, nopCloserWriterToType: + return reflect.ValueOf(r).Field(0).Interface().(io.Reader) + default: + return r + } +} diff --git a/vendor/oras.land/oras-go/v2/internal/manifestutil/parser.go b/vendor/oras.land/oras-go/v2/internal/manifestutil/parser.go new file mode 100644 index 000000000000..89d556b8fec0 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/manifestutil/parser.go @@ -0,0 +1,84 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manifestutil + +import ( + "context" + "encoding/json" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/spec" +) + +// Config returns the config of desc, if present. +func Config(ctx context.Context, fetcher content.Fetcher, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + switch desc.MediaType { + case docker.MediaTypeManifest, ocispec.MediaTypeImageManifest: + content, err := content.FetchAll(ctx, fetcher, desc) + if err != nil { + return nil, err + } + // OCI manifest schema can be used to marshal docker manifest + var manifest ocispec.Manifest + if err := json.Unmarshal(content, &manifest); err != nil { + return nil, err + } + return &manifest.Config, nil + default: + return nil, nil + } +} + +// Manifest returns the manifests of desc, if present. +func Manifests(ctx context.Context, fetcher content.Fetcher, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case docker.MediaTypeManifestList, ocispec.MediaTypeImageIndex: + content, err := content.FetchAll(ctx, fetcher, desc) + if err != nil { + return nil, err + } + // OCI manifest index schema can be used to marshal docker manifest list + var index ocispec.Index + if err := json.Unmarshal(content, &index); err != nil { + return nil, err + } + return index.Manifests, nil + default: + return nil, nil + } +} + +// Subject returns the subject of desc, if present. +func Subject(ctx context.Context, fetcher content.Fetcher, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex, spec.MediaTypeArtifactManifest: + content, err := content.FetchAll(ctx, fetcher, desc) + if err != nil { + return nil, err + } + var manifest struct { + Subject *ocispec.Descriptor `json:"subject,omitempty"` + } + if err := json.Unmarshal(content, &manifest); err != nil { + return nil, err + } + return manifest.Subject, nil + default: + return nil, nil + } +} diff --git a/vendor/oras.land/oras-go/v2/internal/platform/platform.go b/vendor/oras.land/oras-go/v2/internal/platform/platform.go new file mode 100644 index 000000000000..3aea3a1b10d0 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/platform/platform.go @@ -0,0 +1,145 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package platform + +import ( + "context" + "encoding/json" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/manifestutil" +) + +// Match checks whether the current platform matches the target platform. +// Match will return true if all of the following conditions are met. +// - Architecture and OS exactly match. +// - Variant and OSVersion exactly match if target platform provided. +// - OSFeatures of the target platform are the subsets of the OSFeatures +// array of the current platform. +// +// Note: Variant, OSVersion and OSFeatures are optional fields, will skip +// the comparison if the target platform does not provide specific value. +func Match(got *ocispec.Platform, want *ocispec.Platform) bool { + if got == nil && want == nil { + return true + } + + if got == nil || want == nil { + return false + } + + if got.Architecture != want.Architecture || got.OS != want.OS { + return false + } + + if want.OSVersion != "" && got.OSVersion != want.OSVersion { + return false + } + + if want.Variant != "" && got.Variant != want.Variant { + return false + } + + if len(want.OSFeatures) != 0 && !isSubset(want.OSFeatures, got.OSFeatures) { + return false + } + + return true +} + +// isSubset returns true if all items in slice A are present in slice B. +func isSubset(a, b []string) bool { + set := make(map[string]bool, len(b)) + for _, v := range b { + set[v] = true + } + for _, v := range a { + if _, ok := set[v]; !ok { + return false + } + } + + return true +} + +// SelectManifest implements platform filter and returns the descriptor of the +// first matched manifest if the root is a manifest list. If the root is a +// manifest, then return the root descriptor if platform matches. +func SelectManifest(ctx context.Context, src content.ReadOnlyStorage, root ocispec.Descriptor, p *ocispec.Platform) (ocispec.Descriptor, error) { + switch root.MediaType { + case docker.MediaTypeManifestList, ocispec.MediaTypeImageIndex: + manifests, err := manifestutil.Manifests(ctx, src, root) + if err != nil { + return ocispec.Descriptor{}, err + } + + // platform filter + for _, m := range manifests { + if Match(m.Platform, p) { + return m, nil + } + } + return ocispec.Descriptor{}, fmt.Errorf("%s: %w: no matching manifest was found in the manifest list", root.Digest, errdef.ErrNotFound) + case docker.MediaTypeManifest, ocispec.MediaTypeImageManifest: + // config will be non-nil for docker manifest and OCI image manifest + config, err := manifestutil.Config(ctx, src, root) + if err != nil { + return ocispec.Descriptor{}, err + } + + configMediaType := docker.MediaTypeConfig + if root.MediaType == ocispec.MediaTypeImageManifest { + configMediaType = ocispec.MediaTypeImageConfig + } + cfgPlatform, err := getPlatformFromConfig(ctx, src, *config, configMediaType) + if err != nil { + return ocispec.Descriptor{}, err + } + + if Match(cfgPlatform, p) { + return root, nil + } + return ocispec.Descriptor{}, fmt.Errorf("%s: %w: platform in manifest does not match target platform", root.Digest, errdef.ErrNotFound) + default: + return ocispec.Descriptor{}, fmt.Errorf("%s: %s: %w", root.Digest, root.MediaType, errdef.ErrUnsupported) + } +} + +// getPlatformFromConfig returns a platform object which is made up from the +// fields in config blob. +func getPlatformFromConfig(ctx context.Context, src content.ReadOnlyStorage, desc ocispec.Descriptor, targetConfigMediaType string) (*ocispec.Platform, error) { + if desc.MediaType != targetConfigMediaType { + return nil, fmt.Errorf("fail to recognize platform from unknown config %s: expect %s", desc.MediaType, targetConfigMediaType) + } + + rc, err := src.Fetch(ctx, desc) + if err != nil { + return nil, err + } + defer rc.Close() + + var platform ocispec.Platform + if err = json.NewDecoder(rc).Decode(&platform); err != nil && err != io.EOF { + return nil, err + } + + return &platform, nil +} diff --git a/vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go b/vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go new file mode 100644 index 000000000000..c238713e5f72 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go @@ -0,0 +1,102 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registryutil + +import ( + "context" + "io" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/ioutil" + "oras.land/oras-go/v2/registry" +) + +// ReferenceStorage represents a CAS that supports registry.ReferenceFetcher. +type ReferenceStorage interface { + content.ReadOnlyStorage + registry.ReferenceFetcher +} + +// Proxy is a caching proxy dedicated for registry.ReferenceFetcher. +// The first fetch call of a described content will read from the remote and +// cache the fetched content. +// The subsequent fetch call will read from the local cache. +type Proxy struct { + registry.ReferenceFetcher + *cas.Proxy +} + +// NewProxy creates a proxy for the `base` ReferenceStorage, using the `cache` +// storage as the cache. +func NewProxy(base ReferenceStorage, cache content.Storage) *Proxy { + return &Proxy{ + ReferenceFetcher: base, + Proxy: cas.NewProxy(base, cache), + } +} + +// FetchReference fetches the content identified by the reference from the +// remote and cache the fetched content. +func (p *Proxy) FetchReference(ctx context.Context, reference string) (ocispec.Descriptor, io.ReadCloser, error) { + target, rc, err := p.ReferenceFetcher.FetchReference(ctx, reference) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + // skip caching if the content already exists in cache + exists, err := p.Cache.Exists(ctx, target) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + if exists { + return target, rc, nil + } + + // cache content while reading + pr, pw := io.Pipe() + var wg sync.WaitGroup + wg.Add(1) + var pushErr error + go func() { + defer wg.Done() + pushErr = p.Cache.Push(ctx, target, pr) + if pushErr != nil { + pr.CloseWithError(pushErr) + } + }() + closer := ioutil.CloserFunc(func() error { + rcErr := rc.Close() + if err := pw.Close(); err != nil { + return err + } + wg.Wait() + if pushErr != nil { + return pushErr + } + return rcErr + }) + + return target, struct { + io.Reader + io.Closer + }{ + Reader: io.TeeReader(rc, pw), + Closer: closer, + }, nil +} diff --git a/vendor/oras.land/oras-go/v2/internal/resolver/memory.go b/vendor/oras.land/oras-go/v2/internal/resolver/memory.go new file mode 100644 index 000000000000..092a29e97bb5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/resolver/memory.go @@ -0,0 +1,104 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "context" + "maps" + "sync" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/container/set" +) + +// Memory is a memory based resolver. +type Memory struct { + lock sync.RWMutex + index map[string]ocispec.Descriptor + tags map[digest.Digest]set.Set[string] +} + +// NewMemory creates a new Memory resolver. +func NewMemory() *Memory { + return &Memory{ + index: make(map[string]ocispec.Descriptor), + tags: make(map[digest.Digest]set.Set[string]), + } +} + +// Resolve resolves a reference to a descriptor. +func (m *Memory) Resolve(_ context.Context, reference string) (ocispec.Descriptor, error) { + m.lock.RLock() + defer m.lock.RUnlock() + + desc, ok := m.index[reference] + if !ok { + return ocispec.Descriptor{}, errdef.ErrNotFound + } + return desc, nil +} + +// Tag tags a descriptor with a reference string. +func (m *Memory) Tag(_ context.Context, desc ocispec.Descriptor, reference string) error { + m.lock.Lock() + defer m.lock.Unlock() + + m.index[reference] = desc + tagSet, ok := m.tags[desc.Digest] + if !ok { + tagSet = set.New[string]() + m.tags[desc.Digest] = tagSet + } + tagSet.Add(reference) + return nil +} + +// Untag removes a reference from index map. +func (m *Memory) Untag(reference string) { + m.lock.Lock() + defer m.lock.Unlock() + + desc, ok := m.index[reference] + if !ok { + return + } + delete(m.index, reference) + tagSet := m.tags[desc.Digest] + tagSet.Delete(reference) + if len(tagSet) == 0 { + delete(m.tags, desc.Digest) + } +} + +// Map dumps the memory into a built-in map structure. +// Like other operations, calling Map() is go-routine safe. +func (m *Memory) Map() map[string]ocispec.Descriptor { + m.lock.RLock() + defer m.lock.RUnlock() + + return maps.Clone(m.index) +} + +// TagSet returns the set of tags of the descriptor. +func (m *Memory) TagSet(desc ocispec.Descriptor) set.Set[string] { + m.lock.RLock() + defer m.lock.RUnlock() + + tagSet := m.tags[desc.Digest] + return maps.Clone(tagSet) +} diff --git a/vendor/oras.land/oras-go/v2/internal/spec/artifact.go b/vendor/oras.land/oras-go/v2/internal/spec/artifact.go new file mode 100644 index 000000000000..7f801fd9caf0 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/spec/artifact.go @@ -0,0 +1,57 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + +const ( + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) + +// MediaTypeArtifactManifest specifies the media type for a content descriptor. +const MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" + +// Artifact describes an artifact manifest. +// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. +// +// This manifest type was introduced in image-spec v1.1.0-rc1 and was removed in +// image-spec v1.1.0-rc3. It is not part of the current image-spec and is kept +// here for Go compatibility. +// +// Reference: https://github.com/opencontainers/image-spec/pull/999 +type Artifact struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the IANA media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []ocispec.Descriptor `json:"blobs,omitempty"` + + // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. + Subject *ocispec.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/oras.land/oras-go/v2/internal/status/tracker.go b/vendor/oras.land/oras-go/v2/internal/status/tracker.go new file mode 100644 index 000000000000..1a48bb5aff69 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/status/tracker.go @@ -0,0 +1,43 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/internal/descriptor" +) + +// Tracker tracks content status described by a descriptor. +type Tracker struct { + status sync.Map // map[descriptor.Descriptor]chan struct{} +} + +// NewTracker creates a new content status tracker. +func NewTracker() *Tracker { + return &Tracker{} +} + +// TryCommit tries to commit the work for the target descriptor. +// Returns true if committed. A channel is also returned for sending +// notifications. Once the work is done, the channel should be closed. +// Returns false if the work is done or still in progress. +func (t *Tracker) TryCommit(target ocispec.Descriptor) (chan struct{}, bool) { + key := descriptor.FromOCI(target) + status, exists := t.status.LoadOrStore(key, make(chan struct{})) + return status.(chan struct{}), !exists +} diff --git a/vendor/oras.land/oras-go/v2/internal/syncutil/limit.go b/vendor/oras.land/oras-go/v2/internal/syncutil/limit.go new file mode 100644 index 000000000000..2a05d4ea28b6 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/syncutil/limit.go @@ -0,0 +1,84 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "context" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +// LimitedRegion provides a way to bound concurrent access to a code block. +type LimitedRegion struct { + ctx context.Context + limiter *semaphore.Weighted + ended bool +} + +// LimitRegion creates a new LimitedRegion. +func LimitRegion(ctx context.Context, limiter *semaphore.Weighted) *LimitedRegion { + if limiter == nil { + return nil + } + return &LimitedRegion{ + ctx: ctx, + limiter: limiter, + ended: true, + } +} + +// Start starts the region with concurrency limit. +func (lr *LimitedRegion) Start() error { + if lr == nil || !lr.ended { + return nil + } + if err := lr.limiter.Acquire(lr.ctx, 1); err != nil { + return err + } + lr.ended = false + return nil +} + +// End ends the region with concurrency limit. +func (lr *LimitedRegion) End() { + if lr == nil || lr.ended { + return + } + lr.limiter.Release(1) + lr.ended = true +} + +// GoFunc represents a function that can be invoked by Go. +type GoFunc[T any] func(ctx context.Context, region *LimitedRegion, t T) error + +// Go concurrently invokes fn on items. +func Go[T any](ctx context.Context, limiter *semaphore.Weighted, fn GoFunc[T], items ...T) error { + eg, egCtx := errgroup.WithContext(ctx) + for _, item := range items { + region := LimitRegion(ctx, limiter) + if err := region.Start(); err != nil { + return err + } + eg.Go(func(t T) func() error { + return func() error { + defer region.End() + return fn(egCtx, region, t) + } + }(item)) + } + return eg.Wait() +} diff --git a/vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go b/vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go new file mode 100644 index 000000000000..1071bedc9030 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go @@ -0,0 +1,67 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "context" + + "golang.org/x/sync/errgroup" +) + +// A LimitedGroup is a collection of goroutines working on subtasks that are part of +// the same overall task. +type LimitedGroup struct { + grp *errgroup.Group + ctx context.Context +} + +// LimitGroup returns a new LimitedGroup and an associated Context derived from ctx. +// +// The number of active goroutines in this group is limited to the given limit. +// A negative value indicates no limit. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func LimitGroup(ctx context.Context, limit int) (*LimitedGroup, context.Context) { + grp, ctx := errgroup.WithContext(ctx) + grp.SetLimit(limit) + return &LimitedGroup{grp: grp, ctx: ctx}, ctx +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context. +// After which, any subsequent calls to Go will not execute their given function. +// The error will be returned by Wait. +func (g *LimitedGroup) Go(f func() error) { + g.grp.Go(func() error { + select { + case <-g.ctx.Done(): + return g.ctx.Err() + default: + return f() + } + }) +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *LimitedGroup) Wait() error { + return g.grp.Wait() +} diff --git a/vendor/oras.land/oras-go/v2/internal/syncutil/merge.go b/vendor/oras.land/oras-go/v2/internal/syncutil/merge.go new file mode 100644 index 000000000000..44788990c205 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/syncutil/merge.go @@ -0,0 +1,140 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import "sync" + +// mergeStatus represents the merge status of an item. +type mergeStatus struct { + // main indicates if items are being merged by the current go-routine. + main bool + // err represents the error of the merge operation. + err error +} + +// Merge represents merge operations on items. +// The state transfer is shown as below: +// +// +----------+ +// | Start +--------+-------------+ +// +----+-----+ | | +// | | | +// v v v +// +----+-----+ +----+----+ +----+----+ +// +-------+ Prepare +<--+ Pending +-->+ Waiting | +// | +----+-----+ +---------+ +----+----+ +// | | | +// | v | +// | + ---+---- + | +// On Error | Resolve | | +// | + ---+---- + | +// | | | +// | v | +// | +----+-----+ | +// +------>+ Complete +<---------------------+ +// +----+-----+ +// | +// v +// +----+-----+ +// | End | +// +----------+ +type Merge[T any] struct { + lock sync.Mutex + committed bool + items []T + status chan mergeStatus + pending []T + pendingStatus chan mergeStatus +} + +// Do merges concurrent operations of items into a single call of prepare and +// resolve. +// If Do is called multiple times concurrently, only one of the calls will be +// selected to invoke prepare and resolve. +func (m *Merge[T]) Do(item T, prepare func() error, resolve func(items []T) error) error { + status := <-m.assign(item) + if status.main { + err := prepare() + items := m.commit() + if err == nil { + err = resolve(items) + } + m.complete(err) + return err + } + return status.err +} + +// assign adds a new item into the item list. +func (m *Merge[T]) assign(item T) <-chan mergeStatus { + m.lock.Lock() + defer m.lock.Unlock() + + if m.committed { + if m.pendingStatus == nil { + m.pendingStatus = make(chan mergeStatus, 1) + } + m.pending = append(m.pending, item) + return m.pendingStatus + } + + if m.status == nil { + m.status = make(chan mergeStatus, 1) + m.status <- mergeStatus{main: true} + } + m.items = append(m.items, item) + return m.status +} + +// commit closes the assignment window, and the assigned items will be ready +// for resolve. +func (m *Merge[T]) commit() []T { + m.lock.Lock() + defer m.lock.Unlock() + + m.committed = true + return m.items +} + +// complete completes the previous merge, and moves the pending items to the +// stage for the next merge. +func (m *Merge[T]) complete(err error) { + // notify results + if err == nil { + close(m.status) + } else { + remaining := len(m.items) - 1 + status := m.status + for remaining > 0 { + status <- mergeStatus{err: err} + remaining-- + } + } + + // move pending items to the stage + m.lock.Lock() + defer m.lock.Unlock() + + m.committed = false + m.items = m.pending + m.status = m.pendingStatus + m.pending = nil + m.pendingStatus = nil + + if m.status != nil { + m.status <- mergeStatus{main: true} + } +} diff --git a/vendor/oras.land/oras-go/v2/internal/syncutil/once.go b/vendor/oras.land/oras-go/v2/internal/syncutil/once.go new file mode 100644 index 000000000000..e449705304e1 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/syncutil/once.go @@ -0,0 +1,102 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "context" + "sync" + "sync/atomic" +) + +// Once is an object that will perform exactly one action. +// Unlike sync.Once, this Once allows the action to have return values. +type Once struct { + result interface{} + err error + status chan bool +} + +// NewOnce creates a new Once instance. +func NewOnce() *Once { + status := make(chan bool, 1) + status <- true + return &Once{ + status: status, + } +} + +// Do calls the function f if and only if Do is being called first time or all +// previous function calls are cancelled, deadline exceeded, or panicking. +// When `once.Do(ctx, f)` is called multiple times, the return value of the +// first call of the function f is stored, and is directly returned for other +// calls. +// Besides the return value of the function f, including the error, Do returns +// true if the function f passed is called first and is not cancelled, deadline +// exceeded, or panicking. Otherwise, returns false. +func (o *Once) Do(ctx context.Context, f func() (interface{}, error)) (bool, interface{}, error) { + defer func() { + if r := recover(); r != nil { + o.status <- true + panic(r) + } + }() + for { + select { + case inProgress := <-o.status: + if !inProgress { + return false, o.result, o.err + } + result, err := f() + if err == context.Canceled || err == context.DeadlineExceeded { + o.status <- true + return false, nil, err + } + o.result, o.err = result, err + close(o.status) + return true, result, err + case <-ctx.Done(): + return false, nil, ctx.Err() + } + } +} + +// OnceOrRetry is an object that will perform exactly one success action. +type OnceOrRetry struct { + done atomic.Bool + lock sync.Mutex +} + +// OnceOrRetry calls the function f if and only if Do is being called for the +// first time for this instance of Once or all previous calls to Do are failed. +func (o *OnceOrRetry) Do(f func() error) error { + // fast path + if o.done.Load() { + return nil + } + + // slow path + o.lock.Lock() + defer o.lock.Unlock() + + if o.done.Load() { + return nil + } + if err := f(); err != nil { + return err + } + o.done.Store(true) + return nil +} diff --git a/vendor/oras.land/oras-go/v2/internal/syncutil/pool.go b/vendor/oras.land/oras-go/v2/internal/syncutil/pool.go new file mode 100644 index 000000000000..6fb4a69c51ad --- /dev/null +++ b/vendor/oras.land/oras-go/v2/internal/syncutil/pool.go @@ -0,0 +1,64 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import "sync" + +// poolItem represents an item in Pool. +type poolItem[T any] struct { + value T + refCount int +} + +// Pool is a scalable pool with items identified by keys. +type Pool[T any] struct { + // New optionally specifies a function to generate a value when Get would + // otherwise return nil. + // It may not be changed concurrently with calls to Get. + New func() T + + lock sync.Mutex + items map[any]*poolItem[T] +} + +// Get gets the value identified by key. +// The caller should invoke the returned function after using the returned item. +func (p *Pool[T]) Get(key any) (*T, func()) { + p.lock.Lock() + defer p.lock.Unlock() + + item, ok := p.items[key] + if !ok { + if p.items == nil { + p.items = make(map[any]*poolItem[T]) + } + item = &poolItem[T]{} + if p.New != nil { + item.value = p.New() + } + p.items[key] = item + } + item.refCount++ + + return &item.value, func() { + p.lock.Lock() + defer p.lock.Unlock() + item.refCount-- + if item.refCount <= 0 { + delete(p.items, key) + } + } +} diff --git a/vendor/oras.land/oras-go/v2/pack.go b/vendor/oras.land/oras-go/v2/pack.go new file mode 100644 index 000000000000..1b9956129072 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/pack.go @@ -0,0 +1,439 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "maps" + "regexp" + "time" + + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/spec" +) + +const ( + // MediaTypeUnknownConfig is the default config mediaType used + // - for [Pack] when PackOptions.PackImageManifest is true and + // PackOptions.ConfigDescriptor is not specified. + // - for [PackManifest] when packManifestVersion is PackManifestVersion1_0 + // and PackManifestOptions.ConfigDescriptor is not specified. + MediaTypeUnknownConfig = "application/vnd.unknown.config.v1+json" + + // MediaTypeUnknownArtifact is the default artifactType used for [Pack] + // when PackOptions.PackImageManifest is false and artifactType is + // not specified. + MediaTypeUnknownArtifact = "application/vnd.unknown.artifact.v1" +) + +var ( + // ErrInvalidDateTimeFormat is returned by [Pack] and [PackManifest] when + // AnnotationArtifactCreated or AnnotationCreated is provided, but its value + // is not in RFC 3339 format. + // Reference: https://www.rfc-editor.org/rfc/rfc3339#section-5.6 + ErrInvalidDateTimeFormat = errors.New("invalid date and time format") + + // ErrMissingArtifactType is returned by [PackManifest] when + // packManifestVersion is PackManifestVersion1_1 and artifactType is + // empty and the config media type is set to + // "application/vnd.oci.empty.v1+json". + ErrMissingArtifactType = errors.New("missing artifact type") +) + +// PackManifestVersion represents the manifest version used for [PackManifest]. +type PackManifestVersion int + +const ( + // PackManifestVersion1_0 represents the OCI Image Manifest defined in + // image-spec v1.0.2. + // Reference: https://github.com/opencontainers/image-spec/blob/v1.0.2/manifest.md + PackManifestVersion1_0 PackManifestVersion = 1 + + // PackManifestVersion1_1_RC4 represents the OCI Image Manifest defined + // in image-spec v1.1.0-rc4. + // Reference: https://github.com/opencontainers/image-spec/blob/v1.1.0-rc4/manifest.md + // + // Deprecated: This constant is deprecated and not recommended for future use. + // Use [PackManifestVersion1_1] instead. + PackManifestVersion1_1_RC4 PackManifestVersion = PackManifestVersion1_1 + + // PackManifestVersion1_1 represents the OCI Image Manifest defined in + // image-spec v1.1.0. + // Reference: https://github.com/opencontainers/image-spec/blob/v1.1.0/manifest.md + PackManifestVersion1_1 PackManifestVersion = 2 +) + +// PackManifestOptions contains optional parameters for [PackManifest]. +type PackManifestOptions struct { + // Subject is the subject of the manifest. + // This option is only valid when PackManifestVersion is + // NOT PackManifestVersion1_0. + Subject *ocispec.Descriptor + + // Layers is the layers of the manifest. + Layers []ocispec.Descriptor + + // ManifestAnnotations is the annotation map of the manifest. + ManifestAnnotations map[string]string + + // ConfigDescriptor is a pointer to the descriptor of the config blob. + // If not nil, ConfigAnnotations will be ignored. + ConfigDescriptor *ocispec.Descriptor + + // ConfigAnnotations is the annotation map of the config descriptor. + // This option is valid only when ConfigDescriptor is nil. + ConfigAnnotations map[string]string +} + +// mediaTypeRegexp checks the format of media types. +// References: +// - https://github.com/opencontainers/image-spec/blob/v1.1.0/schema/defs-descriptor.json#L7 +// - https://datatracker.ietf.org/doc/html/rfc6838#section-4.2 +var mediaTypeRegexp = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}/[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}$`) + +// PackManifest generates an OCI Image Manifest based on the given parameters +// and pushes the packed manifest to a content storage using pusher. The version +// of the manifest to be packed is determined by packManifestVersion +// (Recommended value: PackManifestVersion1_1). +// +// - If packManifestVersion is [PackManifestVersion1_1]: +// artifactType MUST NOT be empty unless opts.ConfigDescriptor is specified. +// - If packManifestVersion is [PackManifestVersion1_0]: +// if opts.ConfigDescriptor is nil, artifactType will be used as the +// config media type; if artifactType is empty, +// "application/vnd.unknown.config.v1+json" will be used. +// if opts.ConfigDescriptor is NOT nil, artifactType will be ignored. +// +// artifactType and opts.ConfigDescriptor.MediaType MUST comply with RFC 6838. +// +// If succeeded, returns a descriptor of the packed manifest. +func PackManifest(ctx context.Context, pusher content.Pusher, packManifestVersion PackManifestVersion, artifactType string, opts PackManifestOptions) (ocispec.Descriptor, error) { + switch packManifestVersion { + case PackManifestVersion1_0: + return packManifestV1_0(ctx, pusher, artifactType, opts) + case PackManifestVersion1_1: + return packManifestV1_1(ctx, pusher, artifactType, opts) + default: + return ocispec.Descriptor{}, fmt.Errorf("PackManifestVersion(%v): %w", packManifestVersion, errdef.ErrUnsupported) + } +} + +// PackOptions contains optional parameters for [Pack]. +// +// Deprecated: This type is deprecated and not recommended for future use. +// Use [PackManifestOptions] instead. +type PackOptions struct { + // Subject is the subject of the manifest. + Subject *ocispec.Descriptor + + // ManifestAnnotations is the annotation map of the manifest. + ManifestAnnotations map[string]string + + // PackImageManifest controls whether to pack an OCI Image Manifest or not. + // - If true, pack an OCI Image Manifest. + // - If false, pack an OCI Artifact Manifest (deprecated). + // + // Default value: false. + PackImageManifest bool + + // ConfigDescriptor is a pointer to the descriptor of the config blob. + // If not nil, artifactType will be implied by the mediaType of the + // specified ConfigDescriptor, and ConfigAnnotations will be ignored. + // This option is valid only when PackImageManifest is true. + ConfigDescriptor *ocispec.Descriptor + + // ConfigAnnotations is the annotation map of the config descriptor. + // This option is valid only when PackImageManifest is true + // and ConfigDescriptor is nil. + ConfigAnnotations map[string]string +} + +// Pack packs the given blobs, generates a manifest for the pack, +// and pushes it to a content storage. +// +// When opts.PackImageManifest is true, artifactType will be used as the +// the config descriptor mediaType of the image manifest. +// +// If succeeded, returns a descriptor of the manifest. +// +// Deprecated: This method is deprecated and not recommended for future use. +// Use [PackManifest] instead. +func Pack(ctx context.Context, pusher content.Pusher, artifactType string, blobs []ocispec.Descriptor, opts PackOptions) (ocispec.Descriptor, error) { + if opts.PackImageManifest { + return packManifestV1_1_RC2(ctx, pusher, artifactType, blobs, opts) + } + return packArtifact(ctx, pusher, artifactType, blobs, opts) +} + +// packArtifact packs an Artifact manifest as defined in image-spec v1.1.0-rc2. +// Reference: https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/artifact.md +func packArtifact(ctx context.Context, pusher content.Pusher, artifactType string, blobs []ocispec.Descriptor, opts PackOptions) (ocispec.Descriptor, error) { + if artifactType == "" { + artifactType = MediaTypeUnknownArtifact + } + + annotations, err := ensureAnnotationCreated(opts.ManifestAnnotations, spec.AnnotationArtifactCreated) + if err != nil { + return ocispec.Descriptor{}, err + } + manifest := spec.Artifact{ + MediaType: spec.MediaTypeArtifactManifest, + ArtifactType: artifactType, + Blobs: blobs, + Subject: opts.Subject, + Annotations: annotations, + } + return pushManifest(ctx, pusher, manifest, manifest.MediaType, manifest.ArtifactType, manifest.Annotations) +} + +// packManifestV1_0 packs an image manifest defined in image-spec v1.0.2. +// Reference: https://github.com/opencontainers/image-spec/blob/v1.0.2/manifest.md +func packManifestV1_0(ctx context.Context, pusher content.Pusher, artifactType string, opts PackManifestOptions) (ocispec.Descriptor, error) { + if opts.Subject != nil { + return ocispec.Descriptor{}, fmt.Errorf("subject is not supported for manifest version %v: %w", PackManifestVersion1_0, errdef.ErrUnsupported) + } + + // prepare config + var configDesc ocispec.Descriptor + if opts.ConfigDescriptor != nil { + if err := validateMediaType(opts.ConfigDescriptor.MediaType); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("invalid config mediaType format: %w", err) + } + configDesc = *opts.ConfigDescriptor + } else { + if artifactType == "" { + artifactType = MediaTypeUnknownConfig + } else if err := validateMediaType(artifactType); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("invalid artifactType format: %w", err) + } + var err error + configDesc, err = pushCustomEmptyConfig(ctx, pusher, artifactType, opts.ConfigAnnotations) + if err != nil { + return ocispec.Descriptor{}, err + } + } + + annotations, err := ensureAnnotationCreated(opts.ManifestAnnotations, ocispec.AnnotationCreated) + if err != nil { + return ocispec.Descriptor{}, err + } + if opts.Layers == nil { + opts.Layers = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + Config: configDesc, + MediaType: ocispec.MediaTypeImageManifest, + Layers: opts.Layers, + Annotations: annotations, + } + return pushManifest(ctx, pusher, manifest, manifest.MediaType, manifest.Config.MediaType, manifest.Annotations) +} + +// packManifestV1_1_RC2 packs an image manifest as defined in image-spec +// v1.1.0-rc2. +// Reference: https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/manifest.md +func packManifestV1_1_RC2(ctx context.Context, pusher content.Pusher, configMediaType string, layers []ocispec.Descriptor, opts PackOptions) (ocispec.Descriptor, error) { + if configMediaType == "" { + configMediaType = MediaTypeUnknownConfig + } + + // prepare config + var configDesc ocispec.Descriptor + if opts.ConfigDescriptor != nil { + configDesc = *opts.ConfigDescriptor + } else { + var err error + configDesc, err = pushCustomEmptyConfig(ctx, pusher, configMediaType, opts.ConfigAnnotations) + if err != nil { + return ocispec.Descriptor{}, err + } + } + + annotations, err := ensureAnnotationCreated(opts.ManifestAnnotations, ocispec.AnnotationCreated) + if err != nil { + return ocispec.Descriptor{}, err + } + if layers == nil { + layers = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + Config: configDesc, + MediaType: ocispec.MediaTypeImageManifest, + Layers: layers, + Subject: opts.Subject, + Annotations: annotations, + } + return pushManifest(ctx, pusher, manifest, manifest.MediaType, manifest.Config.MediaType, manifest.Annotations) +} + +// packManifestV1_1 packs an image manifest defined in image-spec v1.1.0. +// Reference: https://github.com/opencontainers/image-spec/blob/v1.1.0/manifest.md#guidelines-for-artifact-usage +func packManifestV1_1(ctx context.Context, pusher content.Pusher, artifactType string, opts PackManifestOptions) (ocispec.Descriptor, error) { + if artifactType == "" && (opts.ConfigDescriptor == nil || opts.ConfigDescriptor.MediaType == ocispec.MediaTypeEmptyJSON) { + // artifactType MUST be set when config.mediaType is set to the empty value + return ocispec.Descriptor{}, ErrMissingArtifactType + } + if artifactType != "" { + if err := validateMediaType(artifactType); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("invalid artifactType format: %w", err) + } + } + + // prepare config + var emptyBlobExists bool + var configDesc ocispec.Descriptor + if opts.ConfigDescriptor != nil { + if err := validateMediaType(opts.ConfigDescriptor.MediaType); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("invalid config mediaType format: %w", err) + } + configDesc = *opts.ConfigDescriptor + } else { + // use the empty descriptor for config + configDesc = ocispec.DescriptorEmptyJSON + configDesc.Annotations = opts.ConfigAnnotations + configBytes := ocispec.DescriptorEmptyJSON.Data + // push config + if err := pushIfNotExist(ctx, pusher, configDesc, configBytes); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to push config: %w", err) + } + emptyBlobExists = true + } + + annotations, err := ensureAnnotationCreated(opts.ManifestAnnotations, ocispec.AnnotationCreated) + if err != nil { + return ocispec.Descriptor{}, err + } + if len(opts.Layers) == 0 { + // use the empty descriptor as the single layer + layerDesc := ocispec.DescriptorEmptyJSON + layerData := ocispec.DescriptorEmptyJSON.Data + if !emptyBlobExists { + if err := pushIfNotExist(ctx, pusher, layerDesc, layerData); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to push layer: %w", err) + } + } + opts.Layers = []ocispec.Descriptor{layerDesc} + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + Config: configDesc, + MediaType: ocispec.MediaTypeImageManifest, + Layers: opts.Layers, + Subject: opts.Subject, + ArtifactType: artifactType, + Annotations: annotations, + } + return pushManifest(ctx, pusher, manifest, manifest.MediaType, manifest.ArtifactType, manifest.Annotations) +} + +// pushIfNotExist pushes data described by desc if it does not exist in the +// target. +func pushIfNotExist(ctx context.Context, pusher content.Pusher, desc ocispec.Descriptor, data []byte) error { + if ros, ok := pusher.(content.ReadOnlyStorage); ok { + exists, err := ros.Exists(ctx, desc) + if err != nil { + return fmt.Errorf("failed to check existence: %s: %s: %w", desc.Digest.String(), desc.MediaType, err) + } + if exists { + return nil + } + } + + if err := pusher.Push(ctx, desc, bytes.NewReader(data)); err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return fmt.Errorf("failed to push: %s: %s: %w", desc.Digest.String(), desc.MediaType, err) + } + return nil +} + +// pushManifest marshals manifest into JSON bytes and pushes it. +func pushManifest(ctx context.Context, pusher content.Pusher, manifest any, mediaType string, artifactType string, annotations map[string]string) (ocispec.Descriptor, error) { + manifestJSON, err := json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err) + } + manifestDesc := content.NewDescriptorFromBytes(mediaType, manifestJSON) + // populate ArtifactType and Annotations of the manifest into manifestDesc + manifestDesc.ArtifactType = artifactType + manifestDesc.Annotations = annotations + // push manifest + if err := pusher.Push(ctx, manifestDesc, bytes.NewReader(manifestJSON)); err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { + return ocispec.Descriptor{}, fmt.Errorf("failed to push manifest: %w", err) + } + return manifestDesc, nil +} + +// pushCustomEmptyConfig generates and pushes an empty config blob. +func pushCustomEmptyConfig(ctx context.Context, pusher content.Pusher, mediaType string, annotations map[string]string) (ocispec.Descriptor, error) { + // Use an empty JSON object here, because some registries may not accept + // empty config blob. + // As of September 2022, GAR is known to return 400 on empty blob upload. + // See https://github.com/oras-project/oras-go/issues/294 for details. + configBytes := []byte("{}") + configDesc := content.NewDescriptorFromBytes(mediaType, configBytes) + configDesc.Annotations = annotations + // push config + if err := pushIfNotExist(ctx, pusher, configDesc, configBytes); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to push config: %w", err) + } + return configDesc, nil +} + +// ensureAnnotationCreated ensures that annotationCreatedKey is in annotations, +// and that its value conforms to RFC 3339. Otherwise returns a new annotation +// map with annotationCreatedKey created. +func ensureAnnotationCreated(annotations map[string]string, annotationCreatedKey string) (map[string]string, error) { + if createdTime, ok := annotations[annotationCreatedKey]; ok { + // if annotationCreatedKey is provided, validate its format + if _, err := time.Parse(time.RFC3339, createdTime); err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidDateTimeFormat, err) + } + return annotations, nil + } + + // copy the original annotation map + copied := make(map[string]string, len(annotations)+1) + maps.Copy(copied, annotations) + + // set creation time in RFC 3339 format + // reference: https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/annotations.md#pre-defined-annotation-keys + now := time.Now().UTC() + copied[annotationCreatedKey] = now.Format(time.RFC3339) + return copied, nil +} + +// validateMediaType validates the format of mediaType. +func validateMediaType(mediaType string) error { + if !mediaTypeRegexp.MatchString(mediaType) { + return fmt.Errorf("%s: %w", mediaType, errdef.ErrInvalidMediaType) + } + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/reference.go b/vendor/oras.land/oras-go/v2/registry/reference.go new file mode 100644 index 000000000000..fc3e95e5599f --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/reference.go @@ -0,0 +1,276 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/opencontainers/go-digest" + "oras.land/oras-go/v2/errdef" +) + +// regular expressions for components. +var ( + // repositoryRegexp is adapted from the distribution implementation. The + // repository name set under OCI distribution spec is a subset of the docker + // spec. For maximum compatability, the docker spec is verified client-side. + // Further checks are left to the server-side. + // + // References: + // - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53 + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pulling-manifests + repositoryRegexp = regexp.MustCompile(`^[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*)*$`) + + // tagRegexp checks the tag name. + // The docker and OCI spec have the same regular expression. + // + // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pulling-manifests + tagRegexp = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) +) + +// Reference references either a resource descriptor (where Reference.Reference +// is a tag or a digest), or a resource repository (where Reference.Reference +// is the empty string). +type Reference struct { + // Registry is the name of the registry. It is usually the domain name of + // the registry optionally with a port. + Registry string + + // Repository is the name of the repository. + Repository string + + // Reference is the reference of the object in the repository. This field + // can take any one of the four valid forms (see ParseReference). In the + // case where it's the empty string, it necessarily implies valid form D, + // and where it is non-empty, then it is either a tag, or a digest + // (implying one of valid forms A, B, or C). + Reference string +} + +// ParseReference parses a string (artifact) into an `artifact reference`. +// Corresponding cryptographic hash implementations are required to be imported +// as specified by https://pkg.go.dev/github.com/opencontainers/go-digest#readme-usage +// if the string contains a digest. +// +// Note: An "image" is an "artifact", however, an "artifact" is not necessarily +// an "image". +// +// The token `artifact` is composed of other tokens, and those in turn are +// composed of others. This definition recursivity requires a notation capable +// of recursion, thus the following two forms have been adopted: +// +// 1. Backus–Naur Form (BNF) has been adopted to address the recursive nature +// of the definition. +// 2. Token opacity is revealed via its label letter-casing. That is, "opaque" +// tokens (i.e., tokens that are not final, and must therefore be further +// broken down into their constituents) are denoted in *lowercase*, while +// final tokens (i.e., leaf-node tokens that are final) are denoted in +// *uppercase*. +// +// Finally, note that a number of the opaque tokens are polymorphic in nature; +// that is, they can take on one of numerous forms, not restricted to a single +// defining form. +// +// The top-level token, `artifact`, is composed of two (opaque) tokens; namely +// `socketaddr` and `path`: +// +// ::= "/" +// +// The former is described as follows: +// +// ::= | ":" +// ::= | +// ::= | +// +// The latter, which is of greater interest here, is described as follows: +// +// ::= | +// ::= "@" | ":" "@" | ":" +// ::= ":" +// +// This second token--`path`--can take on exactly four forms, each of which will +// now be illustrated: +// +// <--- path --------------------------------------------> | - Decode `path` +// <=== REPOSITORY ===> <--- reference ------------------> | - Decode `reference` +// <=== REPOSITORY ===> @ <=================== digest ===> | - Valid Form A +// <=== REPOSITORY ===> : @ <=== digest ===> | - Valid Form B (tag is dropped) +// <=== REPOSITORY ===> : <=== TAG ======================> | - Valid Form C +// <=== REPOSITORY ======================================> | - Valid Form D +// +// Note: In the case of Valid Form B, TAG is dropped without any validation or +// further consideration. +func ParseReference(artifact string) (Reference, error) { + parts := strings.SplitN(artifact, "/", 2) + if len(parts) == 1 { + // Invalid Form + return Reference{}, fmt.Errorf("%w: missing registry or repository", errdef.ErrInvalidReference) + } + registry, path := parts[0], parts[1] + + var isTag bool + var repository string + var reference string + if index := strings.Index(path, "@"); index != -1 { + // `digest` found; Valid Form A (if not B) + isTag = false + repository = path[:index] + reference = path[index+1:] + + if index = strings.Index(repository, ":"); index != -1 { + // `tag` found (and now dropped without validation) since `the + // `digest` already present; Valid Form B + repository = repository[:index] + } + } else if index = strings.Index(path, ":"); index != -1 { + // `tag` found; Valid Form C + isTag = true + repository = path[:index] + reference = path[index+1:] + } else { + // empty `reference`; Valid Form D + repository = path + } + ref := Reference{ + Registry: registry, + Repository: repository, + Reference: reference, + } + + if err := ref.ValidateRegistry(); err != nil { + return Reference{}, err + } + + if err := ref.ValidateRepository(); err != nil { + return Reference{}, err + } + + if len(ref.Reference) == 0 { + return ref, nil + } + + validator := ref.ValidateReferenceAsDigest + if isTag { + validator = ref.ValidateReferenceAsTag + } + if err := validator(); err != nil { + return Reference{}, err + } + + return ref, nil +} + +// Validate the entire reference object; the registry, the repository, and the +// reference. +func (r Reference) Validate() error { + if err := r.ValidateRegistry(); err != nil { + return err + } + + if err := r.ValidateRepository(); err != nil { + return err + } + + return r.ValidateReference() +} + +// ValidateRegistry validates the registry. +func (r Reference) ValidateRegistry() error { + if uri, err := url.ParseRequestURI("dummy://" + r.Registry); err != nil || uri.Host == "" || uri.Host != r.Registry { + return fmt.Errorf("%w: invalid registry %q", errdef.ErrInvalidReference, r.Registry) + } + return nil +} + +// ValidateRepository validates the repository. +func (r Reference) ValidateRepository() error { + if !repositoryRegexp.MatchString(r.Repository) { + return fmt.Errorf("%w: invalid repository %q", errdef.ErrInvalidReference, r.Repository) + } + return nil +} + +// ValidateReferenceAsTag validates the reference as a tag. +func (r Reference) ValidateReferenceAsTag() error { + if !tagRegexp.MatchString(r.Reference) { + return fmt.Errorf("%w: invalid tag %q", errdef.ErrInvalidReference, r.Reference) + } + return nil +} + +// ValidateReferenceAsDigest validates the reference as a digest. +func (r Reference) ValidateReferenceAsDigest() error { + if _, err := r.Digest(); err != nil { + return fmt.Errorf("%w: invalid digest %q: %v", errdef.ErrInvalidReference, r.Reference, err) + } + return nil +} + +// ValidateReference where the reference is first tried as an ampty string, then +// as a digest, and if that fails, as a tag. +func (r Reference) ValidateReference() error { + if len(r.Reference) == 0 { + return nil + } + + if index := strings.IndexByte(r.Reference, ':'); index != -1 { + return r.ValidateReferenceAsDigest() + } + + return r.ValidateReferenceAsTag() +} + +// Host returns the host name of the registry. +func (r Reference) Host() string { + if r.Registry == "docker.io" { + return "registry-1.docker.io" + } + return r.Registry +} + +// ReferenceOrDefault returns the reference or the default reference if empty. +func (r Reference) ReferenceOrDefault() string { + if r.Reference == "" { + return "latest" + } + return r.Reference +} + +// Digest returns the reference as a digest. +// Corresponding cryptographic hash implementations are required to be imported +// as specified by https://pkg.go.dev/github.com/opencontainers/go-digest#readme-usage +func (r Reference) Digest() (digest.Digest, error) { + return digest.Parse(r.Reference) +} + +// String implements `fmt.Stringer` and returns the reference string. +// The resulted string is meaningful only if the reference is valid. +func (r Reference) String() string { + if r.Repository == "" { + return r.Registry + } + ref := r.Registry + "/" + r.Repository + if r.Reference == "" { + return ref + } + if d, err := r.Digest(); err == nil { + return ref + "@" + d.String() + } + return ref + ":" + r.Reference +} diff --git a/vendor/oras.land/oras-go/v2/registry/registry.go b/vendor/oras.land/oras-go/v2/registry/registry.go new file mode 100644 index 000000000000..e1da0ab9c729 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/registry.go @@ -0,0 +1,52 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package registry provides high-level operations to manage registries. +package registry + +import "context" + +// Registry represents a collection of repositories. +type Registry interface { + // Repositories lists the name of repositories available in the registry. + // Since the returned repositories may be paginated by the underlying + // implementation, a function should be passed in to process the paginated + // repository list. + // `last` argument is the `last` parameter when invoking the catalog API. + // If `last` is NOT empty, the entries in the response start after the + // repo specified by `last`. Otherwise, the response starts from the top + // of the Repositories list. + // Note: When implemented by a remote registry, the catalog API is called. + // However, not all registries supports pagination or conforms the + // specification. + // Reference: https://docs.docker.com/registry/spec/api/#catalog + // See also `Repositories()` in this package. + Repositories(ctx context.Context, last string, fn func(repos []string) error) error + + // Repository returns a repository reference by the given name. + Repository(ctx context.Context, name string) (Repository, error) +} + +// Repositories lists the name of repositories available in the registry. +func Repositories(ctx context.Context, reg Registry) ([]string, error) { + var res []string + if err := reg.Repositories(ctx, "", func(repos []string) error { + res = append(res, repos...) + return nil + }); err != nil { + return nil, err + } + return res, nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/auth/cache.go b/vendor/oras.land/oras-go/v2/registry/remote/auth/cache.go new file mode 100644 index 000000000000..d11c092bf575 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/auth/cache.go @@ -0,0 +1,232 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "strings" + "sync" + + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/syncutil" +) + +// DefaultCache is the sharable cache used by DefaultClient. +var DefaultCache Cache = NewCache() + +// Cache caches the auth-scheme and auth-token for the "Authorization" header in +// accessing the remote registry. +// Precisely, the header is `Authorization: auth-scheme auth-token`. +// The `auth-token` is a generic term as `token68` in RFC 7235 section 2.1. +type Cache interface { + // GetScheme returns the auth-scheme part cached for the given registry. + // A single registry is assumed to have a consistent scheme. + // If a registry has different schemes per path, the auth client is still + // workable. However, the cache may not be effective as the cache cannot + // correctly guess the scheme. + GetScheme(ctx context.Context, registry string) (Scheme, error) + + // GetToken returns the auth-token part cached for the given registry of a + // given scheme. + // The underlying implementation MAY cache the token for all schemes for the + // given registry. + GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) + + // Set fetches the token using the given fetch function and caches the token + // for the given scheme with the given key for the given registry. + // The return values of the fetch function is returned by this function. + // The underlying implementation MAY combine the fetch operation if the Set + // function is invoked multiple times at the same time. + Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) +} + +// cacheEntry is a cache entry for a single registry. +type cacheEntry struct { + scheme Scheme + tokens sync.Map // map[string]string +} + +// concurrentCache is a cache suitable for concurrent invocation. +type concurrentCache struct { + status sync.Map // map[string]*syncutil.Once + cache sync.Map // map[string]*cacheEntry +} + +// NewCache creates a new go-routine safe cache instance. +func NewCache() Cache { + return &concurrentCache{} +} + +// GetScheme returns the auth-scheme part cached for the given registry. +func (cc *concurrentCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { + entry, ok := cc.cache.Load(registry) + if !ok { + return SchemeUnknown, errdef.ErrNotFound + } + return entry.(*cacheEntry).scheme, nil +} + +// GetToken returns the auth-token part cached for the given registry of a given +// scheme. +func (cc *concurrentCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + entryValue, ok := cc.cache.Load(registry) + if !ok { + return "", errdef.ErrNotFound + } + entry := entryValue.(*cacheEntry) + if entry.scheme != scheme { + return "", errdef.ErrNotFound + } + if token, ok := entry.tokens.Load(key); ok { + return token.(string), nil + } + return "", errdef.ErrNotFound +} + +// Set fetches the token using the given fetch function and caches the token +// for the given scheme with the given key for the given registry. +// Set combines the fetch operation if the Set is invoked multiple times at the +// same time. +func (cc *concurrentCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + // fetch token + statusKey := strings.Join([]string{ + registry, + scheme.String(), + key, + }, " ") + statusValue, _ := cc.status.LoadOrStore(statusKey, syncutil.NewOnce()) + fetchOnce := statusValue.(*syncutil.Once) + fetchedFirst, result, err := fetchOnce.Do(ctx, func() (interface{}, error) { + return fetch(ctx) + }) + if fetchedFirst { + cc.status.Delete(statusKey) + } + if err != nil { + return "", err + } + token := result.(string) + if !fetchedFirst { + return token, nil + } + + // cache token + newEntry := &cacheEntry{ + scheme: scheme, + } + entryValue, exists := cc.cache.LoadOrStore(registry, newEntry) + entry := entryValue.(*cacheEntry) + if exists && entry.scheme != scheme { + // there is a scheme change, which is not expected in most scenarios. + // force invalidating all previous cache. + entry = newEntry + cc.cache.Store(registry, entry) + } + entry.tokens.Store(key, token) + + return token, nil +} + +// noCache is a cache implementation that does not do cache at all. +type noCache struct{} + +// GetScheme always returns not found error as it has no cache. +func (noCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { + return SchemeUnknown, errdef.ErrNotFound +} + +// GetToken always returns not found error as it has no cache. +func (noCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + return "", errdef.ErrNotFound +} + +// Set calls fetch directly without caching. +func (noCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + return fetch(ctx) +} + +// hostCache is an auth cache that ignores scopes. Uses only the registry's hostname to find a token. +type hostCache struct { + Cache +} + +// GetToken implements Cache. +func (c *hostCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + return c.Cache.GetToken(ctx, registry, scheme, "") +} + +// Set implements Cache. +func (c *hostCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + return c.Cache.Set(ctx, registry, scheme, "", fetch) +} + +// fallbackCache tries the primary cache then falls back to the secondary cache. +type fallbackCache struct { + primary Cache + secondary Cache +} + +// GetScheme implements Cache. +func (fc *fallbackCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { + scheme, err := fc.primary.GetScheme(ctx, registry) + if err == nil { + return scheme, nil + } + + // fallback + return fc.secondary.GetScheme(ctx, registry) +} + +// GetToken implements Cache. +func (fc *fallbackCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + token, err := fc.primary.GetToken(ctx, registry, scheme, key) + if err == nil { + return token, nil + } + + // fallback + return fc.secondary.GetToken(ctx, registry, scheme, key) +} + +// Set implements Cache. +func (fc *fallbackCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + token, err := fc.primary.Set(ctx, registry, scheme, key, fetch) + if err != nil { + return "", err + } + + return fc.secondary.Set(ctx, registry, scheme, key, func(ctx context.Context) (string, error) { + return token, nil + }) +} + +// NewSingleContextCache creates a host-based cache for optimizing the auth flow for non-compliant registries. +// It is intended to be used in a single context, such as pulling from a single repository. +// This cache should not be shared. +// +// Note: [NewCache] should be used for compliant registries as it can be shared +// across context and will generally make less re-authentication requests. +func NewSingleContextCache() Cache { + cache := NewCache() + return &fallbackCache{ + primary: cache, + // We can re-use the came concurrentCache here because the key space is different + // (keys are always empty for the hostCache) so there is no collision. + // Even if there is a collision it is not an issue. + // Re-using saves a little memory. + secondary: &hostCache{cache}, + } +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/auth/challenge.go b/vendor/oras.land/oras-go/v2/registry/remote/auth/challenge.go new file mode 100644 index 000000000000..58bdefda9cd1 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/auth/challenge.go @@ -0,0 +1,167 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "strconv" + "strings" +) + +// Scheme define the authentication method. +type Scheme byte + +const ( + // SchemeUnknown represents unknown or unsupported schemes + SchemeUnknown Scheme = iota + + // SchemeBasic represents the "Basic" HTTP authentication scheme. + // Reference: https://tools.ietf.org/html/rfc7617 + SchemeBasic + + // SchemeBearer represents the Bearer token in OAuth 2.0. + // Reference: https://tools.ietf.org/html/rfc6750 + SchemeBearer +) + +// parseScheme parse the authentication scheme from the given string +// case-insensitively. +func parseScheme(scheme string) Scheme { + switch { + case strings.EqualFold(scheme, "basic"): + return SchemeBasic + case strings.EqualFold(scheme, "bearer"): + return SchemeBearer + } + return SchemeUnknown +} + +// String return the string for the scheme. +func (s Scheme) String() string { + switch s { + case SchemeBasic: + return "Basic" + case SchemeBearer: + return "Bearer" + } + return "Unknown" +} + +// parseChallenge parses the "WWW-Authenticate" header returned by the remote +// registry, and extracts parameters if scheme is Bearer. +// References: +// - https://docs.docker.com/registry/spec/auth/token/#how-to-authenticate +// - https://tools.ietf.org/html/rfc7235#section-2.1 +func parseChallenge(header string) (scheme Scheme, params map[string]string) { + // as defined in RFC 7235 section 2.1, we have + // challenge = auth-scheme [ 1*SP ( token68 / #auth-param ) ] + // auth-scheme = token + // auth-param = token BWS "=" BWS ( token / quoted-string ) + // + // since we focus parameters only on Bearer, we have + // challenge = auth-scheme [ 1*SP #auth-param ] + schemeString, rest := parseToken(header) + scheme = parseScheme(schemeString) + + // fast path for non bearer challenge + if scheme != SchemeBearer { + return + } + + // parse params for bearer auth. + // combining RFC 7235 section 2.1 with RFC 7230 section 7, we have + // #auth-param => auth-param *( OWS "," OWS auth-param ) + var key, value string + for { + key, rest = parseToken(skipSpace(rest)) + if key == "" { + return + } + + rest = skipSpace(rest) + if rest == "" || rest[0] != '=' { + return + } + rest = skipSpace(rest[1:]) + if rest == "" { + return + } + + if rest[0] == '"' { + prefix, err := strconv.QuotedPrefix(rest) + if err != nil { + return + } + value, err = strconv.Unquote(prefix) + if err != nil { + return + } + rest = rest[len(prefix):] + } else { + value, rest = parseToken(rest) + if value == "" { + return + } + } + if params == nil { + params = map[string]string{ + key: value, + } + } else { + params[key] = value + } + + rest = skipSpace(rest) + if rest == "" || rest[0] != ',' { + return + } + rest = rest[1:] + } +} + +// isNotTokenChar reports whether rune is not a `tchar` defined in RFC 7230 +// section 3.2.6. +func isNotTokenChar(r rune) bool { + // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" + // / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" + // / DIGIT / ALPHA + // ; any VCHAR, except delimiters + return (r < 'A' || r > 'Z') && (r < 'a' || r > 'z') && + (r < '0' || r > '9') && !strings.ContainsRune("!#$%&'*+-.^_`|~", r) +} + +// parseToken finds the next token from the given string. If no token found, +// an empty token is returned and the whole of the input is returned in rest. +// Note: Since token = 1*tchar, empty string is not a valid token. +func parseToken(s string) (token, rest string) { + if i := strings.IndexFunc(s, isNotTokenChar); i != -1 { + return s[:i], s[i:] + } + return s, "" +} + +// skipSpace skips "bad" whitespace (BWS) defined in RFC 7230 section 3.2.3. +func skipSpace(s string) string { + // OWS = *( SP / HTAB ) + // ; optional whitespace + // BWS = OWS + // ; "bad" whitespace + if i := strings.IndexFunc(s, func(r rune) bool { + return r != ' ' && r != '\t' + }); i != -1 { + return s[i:] + } + return s +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/auth/client.go b/vendor/oras.land/oras-go/v2/registry/remote/auth/client.go new file mode 100644 index 000000000000..8d9685a21356 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/auth/client.go @@ -0,0 +1,430 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package auth provides authentication for a client to a remote registry. +package auth + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "oras.land/oras-go/v2/registry/remote/internal/errutil" + "oras.land/oras-go/v2/registry/remote/retry" +) + +// ErrBasicCredentialNotFound is returned when the credential is not found for +// basic auth. +var ErrBasicCredentialNotFound = errors.New("basic credential not found") + +// DefaultClient is the default auth-decorated client. +var DefaultClient = &Client{ + Client: retry.DefaultClient, + Header: http.Header{ + "User-Agent": {"oras-go"}, + }, + Cache: DefaultCache, +} + +// maxResponseBytes specifies the default limit on how many response bytes are +// allowed in the server's response from authorization service servers. +// A typical response message from authorization service servers is around 1 to +// 4 KiB. Since the size of a token must be smaller than the HTTP header size +// limit, which is usually 16 KiB. As specified by the distribution, the +// response may contain 2 identical tokens, that is, 16 x 2 = 32 KiB. +// Hence, 128 KiB should be sufficient. +// References: https://docs.docker.com/registry/spec/auth/token/ +var maxResponseBytes int64 = 128 * 1024 // 128 KiB + +// defaultClientID specifies the default client ID used in OAuth2. +// See also ClientID. +var defaultClientID = "oras-go" + +// CredentialFunc represents a function that resolves the credential for the +// given registry (i.e. host:port). +// +// [EmptyCredential] is a valid return value and should not be considered as +// an error. +type CredentialFunc func(ctx context.Context, hostport string) (Credential, error) + +// StaticCredential specifies static credentials for the given host. +func StaticCredential(registry string, cred Credential) CredentialFunc { + if registry == "docker.io" { + // it is expected that traffic targeting "docker.io" will be redirected + // to "registry-1.docker.io" + // reference: https://github.com/moby/moby/blob/v24.0.0-beta.2/registry/config.go#L25-L48 + registry = "registry-1.docker.io" + } + return func(_ context.Context, hostport string) (Credential, error) { + if hostport == registry { + return cred, nil + } + return EmptyCredential, nil + } +} + +// Client is an auth-decorated HTTP client. +// Its zero value is a usable client that uses http.DefaultClient with no cache. +type Client struct { + // Client is the underlying HTTP client used to access the remote + // server. + // If nil, http.DefaultClient is used. + // It is possible to use the default retry client from the package + // `oras.land/oras-go/v2/registry/remote/retry`. That client is already available + // in the DefaultClient. + // It is also possible to use a custom client. For example, github.com/hashicorp/go-retryablehttp + // is a popular HTTP client that supports retries. + Client *http.Client + + // Header contains the custom headers to be added to each request. + Header http.Header + + // Credential specifies the function for resolving the credential for the + // given registry (i.e. host:port). + // EmptyCredential is a valid return value and should not be considered as + // an error. + // If nil, the credential is always resolved to EmptyCredential. + Credential CredentialFunc + + // Cache caches credentials for direct accessing the remote registry. + // If nil, no cache is used. + Cache Cache + + // ClientID used in fetching OAuth2 token as a required field. + // If empty, a default client ID is used. + // Reference: https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + ClientID string + + // ForceAttemptOAuth2 controls whether to follow OAuth2 with password grant + // instead the distribution spec when authenticating using username and + // password. + // References: + // - https://docs.docker.com/registry/spec/auth/jwt/ + // - https://docs.docker.com/registry/spec/auth/oauth/ + ForceAttemptOAuth2 bool +} + +// client returns an HTTP client used to access the remote registry. +// http.DefaultClient is return if the client is not configured. +func (c *Client) client() *http.Client { + if c.Client == nil { + return http.DefaultClient + } + return c.Client +} + +// send adds headers to the request and sends the request to the remote server. +func (c *Client) send(req *http.Request) (*http.Response, error) { + for key, values := range c.Header { + req.Header[key] = append(req.Header[key], values...) + } + return c.client().Do(req) +} + +// credential resolves the credential for the given registry. +func (c *Client) credential(ctx context.Context, reg string) (Credential, error) { + if c.Credential == nil { + return EmptyCredential, nil + } + return c.Credential(ctx, reg) +} + +// cache resolves the cache. +// noCache is return if the cache is not configured. +func (c *Client) cache() Cache { + if c.Cache == nil { + return noCache{} + } + return c.Cache +} + +// SetUserAgent sets the user agent for all out-going requests. +func (c *Client) SetUserAgent(userAgent string) { + if c.Header == nil { + c.Header = http.Header{} + } + c.Header.Set("User-Agent", userAgent) +} + +// Do sends the request to the remote server, attempting to resolve +// authentication if 'Authorization' header is not set. +// +// On authentication failure due to bad credential, +// - Do returns error if it fails to fetch token for bearer auth. +// - Do returns the registry response without error for basic auth. +func (c *Client) Do(originalReq *http.Request) (*http.Response, error) { + if auth := originalReq.Header.Get("Authorization"); auth != "" { + return c.send(originalReq) + } + + ctx := originalReq.Context() + req := originalReq.Clone(ctx) + + // attempt cached auth token + var attemptedKey string + cache := c.cache() + host := originalReq.Host + scheme, err := cache.GetScheme(ctx, host) + if err == nil { + switch scheme { + case SchemeBasic: + token, err := cache.GetToken(ctx, host, SchemeBasic, "") + if err == nil { + req.Header.Set("Authorization", "Basic "+token) + } + case SchemeBearer: + scopes := GetAllScopesForHost(ctx, host) + attemptedKey = strings.Join(scopes, " ") + token, err := cache.GetToken(ctx, host, SchemeBearer, attemptedKey) + if err == nil { + req.Header.Set("Authorization", "Bearer "+token) + } + } + } + + resp, err := c.send(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + + // attempt again with credentials for recognized schemes + challenge := resp.Header.Get("Www-Authenticate") + scheme, params := parseChallenge(challenge) + switch scheme { + case SchemeBasic: + resp.Body.Close() + + token, err := cache.Set(ctx, host, SchemeBasic, "", func(ctx context.Context) (string, error) { + return c.fetchBasicAuth(ctx, host) + }) + if err != nil { + return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) + } + + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Basic "+token) + case SchemeBearer: + resp.Body.Close() + + scopes := GetAllScopesForHost(ctx, host) + if paramScope := params["scope"]; paramScope != "" { + // merge hinted scopes with challenged scopes + scopes = append(scopes, strings.Split(paramScope, " ")...) + scopes = CleanScopes(scopes) + } + key := strings.Join(scopes, " ") + + // attempt the cache again if there is a scope change + if key != attemptedKey { + if token, err := cache.GetToken(ctx, host, SchemeBearer, key); err == nil { + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Bearer "+token) + if err := rewindRequestBody(req); err != nil { + return nil, err + } + + resp, err := c.send(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + resp.Body.Close() + } + } + + // attempt with credentials + realm := params["realm"] + service := params["service"] + token, err := cache.Set(ctx, host, SchemeBearer, key, func(ctx context.Context) (string, error) { + return c.fetchBearerToken(ctx, host, realm, service, scopes) + }) + if err != nil { + return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) + } + + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Bearer "+token) + default: + return resp, nil + } + if err := rewindRequestBody(req); err != nil { + return nil, err + } + + return c.send(req) +} + +// fetchBasicAuth fetches a basic auth token for the basic challenge. +func (c *Client) fetchBasicAuth(ctx context.Context, registry string) (string, error) { + cred, err := c.credential(ctx, registry) + if err != nil { + return "", fmt.Errorf("failed to resolve credential: %w", err) + } + if cred == EmptyCredential { + return "", ErrBasicCredentialNotFound + } + if cred.Username == "" || cred.Password == "" { + return "", errors.New("missing username or password for basic auth") + } + auth := cred.Username + ":" + cred.Password + return base64.StdEncoding.EncodeToString([]byte(auth)), nil +} + +// fetchBearerToken fetches an access token for the bearer challenge. +func (c *Client) fetchBearerToken(ctx context.Context, registry, realm, service string, scopes []string) (string, error) { + cred, err := c.credential(ctx, registry) + if err != nil { + return "", err + } + if cred.AccessToken != "" { + return cred.AccessToken, nil + } + if cred == EmptyCredential || (cred.RefreshToken == "" && !c.ForceAttemptOAuth2) { + return c.fetchDistributionToken(ctx, realm, service, scopes, cred.Username, cred.Password) + } + return c.fetchOAuth2Token(ctx, realm, service, scopes, cred) +} + +// fetchDistributionToken fetches an access token as defined by the distribution +// specification. +// It fetches anonymous tokens if no credential is provided. +// References: +// - https://docs.docker.com/registry/spec/auth/jwt/ +// - https://docs.docker.com/registry/spec/auth/token/ +func (c *Client) fetchDistributionToken(ctx context.Context, realm, service string, scopes []string, username, password string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) + if err != nil { + return "", err + } + if username != "" || password != "" { + req.SetBasicAuth(username, password) + } + q := req.URL.Query() + if service != "" { + q.Set("service", service) + } + for _, scope := range scopes { + q.Add("scope", scope) + } + req.URL.RawQuery = q.Encode() + + resp, err := c.send(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + + // As specified in https://docs.docker.com/registry/spec/auth/token/ section + // "Token Response Fields", the token is either in `token` or + // `access_token`. If both present, they are identical. + var result struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + } + lr := io.LimitReader(resp.Body, maxResponseBytes) + if err := json.NewDecoder(lr).Decode(&result); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if result.AccessToken != "" { + return result.AccessToken, nil + } + if result.Token != "" { + return result.Token, nil + } + return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) +} + +// fetchOAuth2Token fetches an OAuth2 access token. +// Reference: https://docs.docker.com/registry/spec/auth/oauth/ +func (c *Client) fetchOAuth2Token(ctx context.Context, realm, service string, scopes []string, cred Credential) (string, error) { + form := url.Values{} + if cred.RefreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", cred.RefreshToken) + } else if cred.Username != "" && cred.Password != "" { + form.Set("grant_type", "password") + form.Set("username", cred.Username) + form.Set("password", cred.Password) + } else { + return "", errors.New("missing username or password for bearer auth") + } + form.Set("service", service) + clientID := c.ClientID + if clientID == "" { + clientID = defaultClientID + } + form.Set("client_id", clientID) + if len(scopes) != 0 { + form.Set("scope", strings.Join(scopes, " ")) + } + body := strings.NewReader(form.Encode()) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, body) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.send(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + + var result struct { + AccessToken string `json:"access_token"` + } + lr := io.LimitReader(resp.Body, maxResponseBytes) + if err := json.NewDecoder(lr).Decode(&result); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if result.AccessToken != "" { + return result.AccessToken, nil + } + return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) +} + +// rewindRequestBody tries to rewind the request body if exists. +func rewindRequestBody(req *http.Request) error { + if req.Body == nil || req.Body == http.NoBody { + return nil + } + if req.GetBody == nil { + return fmt.Errorf("%s %q: request body is not rewindable", req.Method, req.URL) + } + body, err := req.GetBody() + if err != nil { + return fmt.Errorf("%s %q: failed to get request body: %w", req.Method, req.URL, err) + } + req.Body = body + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/auth/credential.go b/vendor/oras.land/oras-go/v2/registry/remote/auth/credential.go new file mode 100644 index 000000000000..013305f711bd --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/auth/credential.go @@ -0,0 +1,40 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +// EmptyCredential represents an empty credential. +var EmptyCredential Credential + +// Credential contains authentication credentials used to access remote +// registries. +type Credential struct { + // Username is the name of the user for the remote registry. + Username string + + // Password is the secret associated with the username. + Password string + + // RefreshToken is a bearer token to be sent to the authorization service + // for fetching access tokens. + // A refresh token is often referred as an identity token. + // Reference: https://docs.docker.com/registry/spec/auth/oauth/ + RefreshToken string + + // AccessToken is a bearer token to be sent to the registry. + // An access token is often referred as a registry token. + // Reference: https://docs.docker.com/registry/spec/auth/token/ + AccessToken string +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/auth/scope.go b/vendor/oras.land/oras-go/v2/registry/remote/auth/scope.go new file mode 100644 index 000000000000..d81cc0d4f561 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/auth/scope.go @@ -0,0 +1,325 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "slices" + "strings" + + "oras.land/oras-go/v2/registry" +) + +// Actions used in scopes. +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +const ( + // ActionPull represents generic read access for resources of the repository + // type. + ActionPull = "pull" + + // ActionPush represents generic write access for resources of the + // repository type. + ActionPush = "push" + + // ActionDelete represents the delete permission for resources of the + // repository type. + ActionDelete = "delete" +) + +// ScopeRegistryCatalog is the scope for registry catalog access. +const ScopeRegistryCatalog = "registry:catalog:*" + +// ScopeRepository returns a repository scope with given actions. +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +func ScopeRepository(repository string, actions ...string) string { + actions = cleanActions(actions) + if repository == "" || len(actions) == 0 { + return "" + } + return strings.Join([]string{ + "repository", + repository, + strings.Join(actions, ","), + }, ":") +} + +// AppendRepositoryScope returns a new context containing scope hints for the +// auth client to fetch bearer tokens with the given actions on the repository. +// If called multiple times, the new scopes will be appended to the existing +// scopes. The resulted scopes are de-duplicated. +// +// For example, uploading blob to the repository "hello-world" does HEAD request +// first then POST and PUT. The HEAD request will return a challenge for scope +// `repository:hello-world:pull`, and the auth client will fetch a token for +// that challenge. Later, the POST request will return a challenge for scope +// `repository:hello-world:push`, and the auth client will fetch a token for +// that challenge again. By invoking AppendRepositoryScope with the actions +// [ActionPull] and [ActionPush] for the repository `hello-world`, +// the auth client with cache is hinted to fetch a token via a single token +// fetch request for all the HEAD, POST, PUT requests. +func AppendRepositoryScope(ctx context.Context, ref registry.Reference, actions ...string) context.Context { + if len(actions) == 0 { + return ctx + } + scope := ScopeRepository(ref.Repository, actions...) + return AppendScopesForHost(ctx, ref.Host(), scope) +} + +// scopesContextKey is the context key for scopes. +type scopesContextKey struct{} + +// WithScopes returns a context with scopes added. Scopes are de-duplicated. +// Scopes are used as hints for the auth client to fetch bearer tokens with +// larger scopes. +// +// For example, uploading blob to the repository "hello-world" does HEAD request +// first then POST and PUT. The HEAD request will return a challenge for scope +// `repository:hello-world:pull`, and the auth client will fetch a token for +// that challenge. Later, the POST request will return a challenge for scope +// `repository:hello-world:push`, and the auth client will fetch a token for +// that challenge again. By invoking WithScopes with the scope +// `repository:hello-world:pull,push`, the auth client with cache is hinted to +// fetch a token via a single token fetch request for all the HEAD, POST, PUT +// requests. +// +// Passing an empty list of scopes will virtually remove the scope hints in the +// context. +// +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +func WithScopes(ctx context.Context, scopes ...string) context.Context { + scopes = CleanScopes(scopes) + return context.WithValue(ctx, scopesContextKey{}, scopes) +} + +// AppendScopes appends additional scopes to the existing scopes in the context +// and returns a new context. The resulted scopes are de-duplicated. +// The append operation does modify the existing scope in the context passed in. +func AppendScopes(ctx context.Context, scopes ...string) context.Context { + if len(scopes) == 0 { + return ctx + } + return WithScopes(ctx, append(GetScopes(ctx), scopes...)...) +} + +// GetScopes returns the scopes in the context. +func GetScopes(ctx context.Context) []string { + if scopes, ok := ctx.Value(scopesContextKey{}).([]string); ok { + return slices.Clone(scopes) + } + return nil +} + +// scopesForHostContextKey is the context key for per-host scopes. +type scopesForHostContextKey string + +// WithScopesForHost returns a context with per-host scopes added. +// Scopes are de-duplicated. +// Scopes are used as hints for the auth client to fetch bearer tokens with +// larger scopes. +// +// For example, uploading blob to the repository "hello-world" does HEAD request +// first then POST and PUT. The HEAD request will return a challenge for scope +// `repository:hello-world:pull`, and the auth client will fetch a token for +// that challenge. Later, the POST request will return a challenge for scope +// `repository:hello-world:push`, and the auth client will fetch a token for +// that challenge again. By invoking WithScopesForHost with the scope +// `repository:hello-world:pull,push`, the auth client with cache is hinted to +// fetch a token via a single token fetch request for all the HEAD, POST, PUT +// requests. +// +// Passing an empty list of scopes will virtually remove the scope hints in the +// context for the given host. +// +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +func WithScopesForHost(ctx context.Context, host string, scopes ...string) context.Context { + scopes = CleanScopes(scopes) + return context.WithValue(ctx, scopesForHostContextKey(host), scopes) +} + +// AppendScopesForHost appends additional scopes to the existing scopes +// in the context for the given host and returns a new context. +// The resulted scopes are de-duplicated. +// The append operation does modify the existing scope in the context passed in. +func AppendScopesForHost(ctx context.Context, host string, scopes ...string) context.Context { + if len(scopes) == 0 { + return ctx + } + oldScopes := GetScopesForHost(ctx, host) + return WithScopesForHost(ctx, host, append(oldScopes, scopes...)...) +} + +// GetScopesForHost returns the scopes in the context for the given host, +// excluding global scopes added by [WithScopes] and [AppendScopes]. +func GetScopesForHost(ctx context.Context, host string) []string { + if scopes, ok := ctx.Value(scopesForHostContextKey(host)).([]string); ok { + return slices.Clone(scopes) + } + return nil +} + +// GetAllScopesForHost returns the scopes in the context for the given host, +// including global scopes added by [WithScopes] and [AppendScopes]. +func GetAllScopesForHost(ctx context.Context, host string) []string { + scopes := GetScopesForHost(ctx, host) + globalScopes := GetScopes(ctx) + + if len(scopes) == 0 { + return globalScopes + } + if len(globalScopes) == 0 { + return scopes + } + // re-clean the scopes + allScopes := append(scopes, globalScopes...) + return CleanScopes(allScopes) +} + +// CleanScopes merges and sort the actions in ascending order if the scopes have +// the same resource type and name. The final scopes are sorted in ascending +// order. In other words, the scopes passed in are de-duplicated and sorted. +// Therefore, the output of this function is deterministic. +// +// If there is a wildcard `*` in the action, other actions in the same resource +// type and name are ignored. +func CleanScopes(scopes []string) []string { + // fast paths + switch len(scopes) { + case 0: + return nil + case 1: + scope := scopes[0] + i := strings.LastIndex(scope, ":") + if i == -1 { + return []string{scope} + } + actionList := strings.Split(scope[i+1:], ",") + actionList = cleanActions(actionList) + if len(actionList) == 0 { + return nil + } + actions := strings.Join(actionList, ",") + scope = scope[:i+1] + actions + return []string{scope} + } + + // slow path + var result []string + + // merge recognizable scopes + resourceTypes := make(map[string]map[string]map[string]struct{}) + for _, scope := range scopes { + // extract resource type + i := strings.Index(scope, ":") + if i == -1 { + result = append(result, scope) + continue + } + resourceType := scope[:i] + + // extract resource name and actions + rest := scope[i+1:] + i = strings.LastIndex(rest, ":") + if i == -1 { + result = append(result, scope) + continue + } + resourceName := rest[:i] + actions := rest[i+1:] + if actions == "" { + // drop scope since no action found + continue + } + + // add to the intermediate map for de-duplication + namedActions := resourceTypes[resourceType] + if namedActions == nil { + namedActions = make(map[string]map[string]struct{}) + resourceTypes[resourceType] = namedActions + } + actionSet := namedActions[resourceName] + if actionSet == nil { + actionSet = make(map[string]struct{}) + namedActions[resourceName] = actionSet + } + for _, action := range strings.Split(actions, ",") { + if action != "" { + actionSet[action] = struct{}{} + } + } + } + + // reconstruct scopes + for resourceType, namedActions := range resourceTypes { + for resourceName, actionSet := range namedActions { + if len(actionSet) == 0 { + continue + } + var actions []string + for action := range actionSet { + if action == "*" { + actions = []string{"*"} + break + } + actions = append(actions, action) + } + slices.Sort(actions) + scope := resourceType + ":" + resourceName + ":" + strings.Join(actions, ",") + result = append(result, scope) + } + } + + // sort and return + slices.Sort(result) + return result +} + +// cleanActions removes the duplicated actions and sort in ascending order. +// If there is a wildcard `*` in the action, other actions are ignored. +func cleanActions(actions []string) []string { + // fast paths + switch len(actions) { + case 0: + return nil + case 1: + if actions[0] == "" { + return nil + } + return actions + } + + // slow path + slices.Sort(actions) + n := 0 + for i := 0; i < len(actions); i++ { + if actions[i] == "*" { + return []string{"*"} + } + if actions[i] != actions[n] { + n++ + if n != i { + actions[n] = actions[i] + } + } + } + n++ + if actions[0] == "" { + if n == 1 { + return nil + } + return actions[1:n] + } + return actions[:n] +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go new file mode 100644 index 000000000000..7664cc2ab0b8 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go @@ -0,0 +1,97 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "context" + "errors" + "fmt" + "strings" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/config" +) + +// FileStore implements a credentials store using the docker configuration file +// to keep the credentials in plain-text. +// +// Reference: https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +type FileStore struct { + // DisablePut disables putting credentials in plaintext. + // If DisablePut is set to true, Put() will return ErrPlaintextPutDisabled. + DisablePut bool + + config *config.Config +} + +var ( + // ErrPlaintextPutDisabled is returned by Put() when DisablePut is set + // to true. + ErrPlaintextPutDisabled = errors.New("putting plaintext credentials is disabled") + // ErrBadCredentialFormat is returned by Put() when the credential format + // is bad. + ErrBadCredentialFormat = errors.New("bad credential format") +) + +// NewFileStore creates a new file credentials store. +// +// Reference: https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +func NewFileStore(configPath string) (*FileStore, error) { + cfg, err := config.Load(configPath) + if err != nil { + return nil, err + } + return newFileStore(cfg), nil +} + +// newFileStore creates a file credentials store based on the given config instance. +func newFileStore(cfg *config.Config) *FileStore { + return &FileStore{config: cfg} +} + +// Get retrieves credentials from the store for the given server address. +func (fs *FileStore) Get(_ context.Context, serverAddress string) (auth.Credential, error) { + return fs.config.GetCredential(serverAddress) +} + +// Put saves credentials into the store for the given server address. +// Returns ErrPlaintextPutDisabled if fs.DisablePut is set to true. +func (fs *FileStore) Put(_ context.Context, serverAddress string, cred auth.Credential) error { + if fs.DisablePut { + return ErrPlaintextPutDisabled + } + if err := validateCredentialFormat(cred); err != nil { + return err + } + + return fs.config.PutCredential(serverAddress, cred) +} + +// Delete removes credentials from the store for the given server address. +func (fs *FileStore) Delete(_ context.Context, serverAddress string) error { + return fs.config.DeleteCredential(serverAddress) +} + +// validateCredentialFormat validates the format of cred. +func validateCredentialFormat(cred auth.Credential) error { + if strings.ContainsRune(cred.Username, ':') { + // Username and password will be encoded in the base64(username:password) + // format in the file. The decoded result will be wrong if username + // contains colon(s). + return fmt.Errorf("%w: colons(:) are not allowed in username", ErrBadCredentialFormat) + } + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go new file mode 100644 index 000000000000..20ee07437eee --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go @@ -0,0 +1,332 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil" +) + +const ( + // configFieldAuths is the "auths" field in the config file. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L19 + configFieldAuths = "auths" + // configFieldCredentialsStore is the "credsStore" field in the config file. + configFieldCredentialsStore = "credsStore" + // configFieldCredentialHelpers is the "credHelpers" field in the config file. + configFieldCredentialHelpers = "credHelpers" +) + +// ErrInvalidConfigFormat is returned when the config format is invalid. +var ErrInvalidConfigFormat = errors.New("invalid config format") + +// AuthConfig contains authorization information for connecting to a Registry. +// References: +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L45 +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/types/authconfig.go#L3-L22 +type AuthConfig struct { + // Auth is a base64-encoded string of "{username}:{password}". + Auth string `json:"auth,omitempty"` + // IdentityToken is used to authenticate the user and get an access token + // for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + // RegistryToken is a bearer token to be sent to a registry. + RegistryToken string `json:"registrytoken,omitempty"` + + Username string `json:"username,omitempty"` // legacy field for compatibility + Password string `json:"password,omitempty"` // legacy field for compatibility +} + +// NewAuthConfig creates an authConfig based on cred. +func NewAuthConfig(cred auth.Credential) AuthConfig { + return AuthConfig{ + Auth: encodeAuth(cred.Username, cred.Password), + IdentityToken: cred.RefreshToken, + RegistryToken: cred.AccessToken, + } +} + +// Credential returns an auth.Credential based on ac. +func (ac AuthConfig) Credential() (auth.Credential, error) { + cred := auth.Credential{ + Username: ac.Username, + Password: ac.Password, + RefreshToken: ac.IdentityToken, + AccessToken: ac.RegistryToken, + } + if ac.Auth != "" { + var err error + // override username and password + cred.Username, cred.Password, err = decodeAuth(ac.Auth) + if err != nil { + return auth.EmptyCredential, fmt.Errorf("failed to decode auth field: %w: %v", ErrInvalidConfigFormat, err) + } + } + return cred, nil +} + +// Config represents a docker configuration file. +// References: +// - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L44 +type Config struct { + // path is the path to the config file. + path string + // rwLock is a read-write-lock for the file store. + rwLock sync.RWMutex + // content is the content of the config file. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L44 + content map[string]json.RawMessage + // authsCache is a cache of the auths field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L19 + authsCache map[string]json.RawMessage + // credentialsStore is the credsStore field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L28 + credentialsStore string + // credentialHelpers is the credHelpers field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L29 + credentialHelpers map[string]string +} + +// Load loads Config from the given config path. +func Load(configPath string) (*Config, error) { + cfg := &Config{path: configPath} + configFile, err := os.Open(configPath) + if err != nil { + if os.IsNotExist(err) { + // init content and caches if the content file does not exist + cfg.content = make(map[string]json.RawMessage) + cfg.authsCache = make(map[string]json.RawMessage) + return cfg, nil + } + return nil, fmt.Errorf("failed to open config file at %s: %w", configPath, err) + } + defer configFile.Close() + + // decode config content if the config file exists + if err := json.NewDecoder(configFile).Decode(&cfg.content); err != nil { + return nil, fmt.Errorf("failed to decode config file at %s: %w: %v", configPath, ErrInvalidConfigFormat, err) + } + + if credsStoreBytes, ok := cfg.content[configFieldCredentialsStore]; ok { + if err := json.Unmarshal(credsStoreBytes, &cfg.credentialsStore); err != nil { + return nil, fmt.Errorf("failed to unmarshal creds store field: %w: %v", ErrInvalidConfigFormat, err) + } + } + + if credHelpersBytes, ok := cfg.content[configFieldCredentialHelpers]; ok { + if err := json.Unmarshal(credHelpersBytes, &cfg.credentialHelpers); err != nil { + return nil, fmt.Errorf("failed to unmarshal cred helpers field: %w: %v", ErrInvalidConfigFormat, err) + } + } + + if authsBytes, ok := cfg.content[configFieldAuths]; ok { + if err := json.Unmarshal(authsBytes, &cfg.authsCache); err != nil { + return nil, fmt.Errorf("failed to unmarshal auths field: %w: %v", ErrInvalidConfigFormat, err) + } + } + if cfg.authsCache == nil { + cfg.authsCache = make(map[string]json.RawMessage) + } + + return cfg, nil +} + +// GetAuthConfig returns an auth.Credential for serverAddress. +func (cfg *Config) GetCredential(serverAddress string) (auth.Credential, error) { + cfg.rwLock.RLock() + defer cfg.rwLock.RUnlock() + + authCfgBytes, ok := cfg.authsCache[serverAddress] + if !ok { + // NOTE: the auth key for the server address may have been stored with + // a http/https prefix in legacy config files, e.g. "registry.example.com" + // can be stored as "https://registry.example.com/". + var matched bool + for addr, auth := range cfg.authsCache { + if toHostname(addr) == serverAddress { + matched = true + authCfgBytes = auth + break + } + } + if !matched { + return auth.EmptyCredential, nil + } + } + var authCfg AuthConfig + if err := json.Unmarshal(authCfgBytes, &authCfg); err != nil { + return auth.EmptyCredential, fmt.Errorf("failed to unmarshal auth field: %w: %v", ErrInvalidConfigFormat, err) + } + return authCfg.Credential() +} + +// PutAuthConfig puts cred for serverAddress. +func (cfg *Config) PutCredential(serverAddress string, cred auth.Credential) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + authCfg := NewAuthConfig(cred) + authCfgBytes, err := json.Marshal(authCfg) + if err != nil { + return fmt.Errorf("failed to marshal auth field: %w", err) + } + cfg.authsCache[serverAddress] = authCfgBytes + return cfg.saveFile() +} + +// DeleteAuthConfig deletes the corresponding credential for serverAddress. +func (cfg *Config) DeleteCredential(serverAddress string) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + if _, ok := cfg.authsCache[serverAddress]; !ok { + // no ops + return nil + } + delete(cfg.authsCache, serverAddress) + return cfg.saveFile() +} + +// GetCredentialHelper returns the credential helpers for serverAddress. +func (cfg *Config) GetCredentialHelper(serverAddress string) string { + return cfg.credentialHelpers[serverAddress] +} + +// CredentialsStore returns the configured credentials store. +func (cfg *Config) CredentialsStore() string { + cfg.rwLock.RLock() + defer cfg.rwLock.RUnlock() + + return cfg.credentialsStore +} + +// Path returns the path to the config file. +func (cfg *Config) Path() string { + return cfg.path +} + +// SetCredentialsStore puts the configured credentials store. +func (cfg *Config) SetCredentialsStore(credsStore string) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + cfg.credentialsStore = credsStore + return cfg.saveFile() +} + +// IsAuthConfigured returns whether there is authentication configured in this +// config file or not. +func (cfg *Config) IsAuthConfigured() bool { + return cfg.credentialsStore != "" || + len(cfg.credentialHelpers) > 0 || + len(cfg.authsCache) > 0 +} + +// saveFile saves Config into the file. +func (cfg *Config) saveFile() (returnErr error) { + // marshal content + // credentialHelpers is skipped as it's never set + if cfg.credentialsStore != "" { + credsStoreBytes, err := json.Marshal(cfg.credentialsStore) + if err != nil { + return fmt.Errorf("failed to marshal creds store: %w", err) + } + cfg.content[configFieldCredentialsStore] = credsStoreBytes + } else { + // omit empty + delete(cfg.content, configFieldCredentialsStore) + } + authsBytes, err := json.Marshal(cfg.authsCache) + if err != nil { + return fmt.Errorf("failed to marshal credentials: %w", err) + } + cfg.content[configFieldAuths] = authsBytes + jsonBytes, err := json.MarshalIndent(cfg.content, "", "\t") + if err != nil { + return fmt.Errorf("failed to marshal config: %w", err) + } + + // write the content to a ingest file for atomicity + configDir := filepath.Dir(cfg.path) + if err := os.MkdirAll(configDir, 0700); err != nil { + return fmt.Errorf("failed to make directory %s: %w", configDir, err) + } + ingest, err := ioutil.Ingest(configDir, bytes.NewReader(jsonBytes)) + if err != nil { + return fmt.Errorf("failed to save config file: %w", err) + } + defer func() { + if returnErr != nil { + // clean up the ingest file in case of error + os.Remove(ingest) + } + }() + + // overwrite the config file + if err := os.Rename(ingest, cfg.path); err != nil { + return fmt.Errorf("failed to save config file: %w", err) + } + return nil +} + +// encodeAuth base64-encodes username and password into base64(username:password). +func encodeAuth(username, password string) string { + if username == "" && password == "" { + return "" + } + return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) +} + +// decodeAuth decodes a base64 encoded string and returns username and password. +func decodeAuth(authStr string) (username string, password string, err error) { + if authStr == "" { + return "", "", nil + } + + decoded, err := base64.StdEncoding.DecodeString(authStr) + if err != nil { + return "", "", err + } + decodedStr := string(decoded) + username, password, ok := strings.Cut(decodedStr, ":") + if !ok { + return "", "", fmt.Errorf("auth '%s' does not conform the base64(username:password) format", decodedStr) + } + return username, password, nil +} + +// toHostname normalizes a server address to just its hostname, removing +// the scheme and the path parts. +// It is used to match keys in the auths map, which may be either stored as +// hostname or as hostname including scheme (in legacy docker config files). +// Reference: https://github.com/docker/cli/blob/v24.0.6/cli/config/credentials/file_store.go#L71 +func toHostname(addr string) string { + addr = strings.TrimPrefix(addr, "http://") + addr = strings.TrimPrefix(addr, "https://") + addr, _, _ = strings.Cut(addr, "/") + return addr +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go new file mode 100644 index 000000000000..a074c68462e3 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go @@ -0,0 +1,80 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package executer is an abstraction for the docker credential helper protocol +// binaries. It is used by nativeStore to interact with installed binaries. +package executer + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "os/exec" + + "oras.land/oras-go/v2/registry/remote/credentials/trace" +) + +// dockerDesktopHelperName is the name of the docker credentials helper +// execuatable. +const dockerDesktopHelperName = "docker-credential-desktop.exe" + +// Executer is an interface that simulates an executable binary. +type Executer interface { + Execute(ctx context.Context, input io.Reader, action string) ([]byte, error) +} + +// executable implements the Executer interface. +type executable struct { + name string +} + +// New returns a new Executer instance. +func New(name string) Executer { + return &executable{ + name: name, + } +} + +// Execute operates on an executable binary and supports context. +func (c *executable) Execute(ctx context.Context, input io.Reader, action string) ([]byte, error) { + cmd := exec.CommandContext(ctx, c.name, action) + cmd.Stdin = input + cmd.Stderr = os.Stderr + trace := trace.ContextExecutableTrace(ctx) + if trace != nil && trace.ExecuteStart != nil { + trace.ExecuteStart(c.name, action) + } + output, err := cmd.Output() + if trace != nil && trace.ExecuteDone != nil { + trace.ExecuteDone(c.name, action, err) + } + if err != nil { + switch execErr := err.(type) { + case *exec.ExitError: + if errMessage := string(bytes.TrimSpace(output)); errMessage != "" { + return nil, errors.New(errMessage) + } + case *exec.Error: + // check if the error is caused by Docker Desktop not running + if execErr.Err == exec.ErrNotFound && c.name == dockerDesktopHelperName { + return nil, errors.New("credentials store is configured to `desktop.exe` but Docker Desktop seems not running") + } + } + return nil, err + } + return output, nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go new file mode 100644 index 000000000000..b2e3179ddbb7 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go @@ -0,0 +1,49 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "fmt" + "io" + "os" +) + +// Ingest writes content into a temporary ingest file with the file name format +// "oras_credstore_temp_{randomString}". +func Ingest(dir string, content io.Reader) (path string, ingestErr error) { + tempFile, err := os.CreateTemp(dir, "oras_credstore_temp_*") + if err != nil { + return "", fmt.Errorf("failed to create ingest file: %w", err) + } + path = tempFile.Name() + defer func() { + if err := tempFile.Close(); err != nil && ingestErr == nil { + ingestErr = fmt.Errorf("failed to close ingest file: %w", err) + } + // remove the temp file in case of error. + if ingestErr != nil { + os.Remove(path) + } + }() + + if err := tempFile.Chmod(0600); err != nil { + return "", fmt.Errorf("failed to ensure permission: %w", err) + } + if _, err := io.Copy(tempFile, content); err != nil { + return "", fmt.Errorf("failed to ingest: %w", err) + } + return +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go new file mode 100644 index 000000000000..6eb7749b4f65 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go @@ -0,0 +1,54 @@ +/* + Copyright The ORAS Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package credentials + +import ( + "context" + "sync" + + "oras.land/oras-go/v2/registry/remote/auth" +) + +// memoryStore is a store that keeps credentials in memory. +type memoryStore struct { + store sync.Map +} + +// NewMemoryStore creates a new in-memory credentials store. +func NewMemoryStore() Store { + return &memoryStore{} +} + +// Get retrieves credentials from the store for the given server address. +func (ms *memoryStore) Get(_ context.Context, serverAddress string) (auth.Credential, error) { + cred, found := ms.store.Load(serverAddress) + if !found { + return auth.EmptyCredential, nil + } + return cred.(auth.Credential), nil +} + +// Put saves credentials into the store for the given server address. +func (ms *memoryStore) Put(_ context.Context, serverAddress string, cred auth.Credential) error { + ms.store.Store(serverAddress, cred) + return nil +} + +// Delete removes credentials from the store for the given server address. +func (ms *memoryStore) Delete(_ context.Context, serverAddress string) error { + ms.store.Delete(serverAddress) + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go new file mode 100644 index 000000000000..9f4c7f742da3 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go @@ -0,0 +1,139 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "bytes" + "context" + "encoding/json" + "os/exec" + "strings" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/executer" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + emptyUsername = "" + errCredentialsNotFoundMessage = "credentials not found in native keychain" +) + +// dockerCredentials mimics how docker credential helper binaries store +// credential information. +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol +type dockerCredentials struct { + ServerURL string `json:"ServerURL"` + Username string `json:"Username"` + Secret string `json:"Secret"` +} + +// nativeStore implements a credentials store using native keychain to keep +// credentials secure. +type nativeStore struct { + exec executer.Executer +} + +// NewNativeStore creates a new native store that uses a remote helper program to +// manage credentials. +// +// The argument of NewNativeStore can be the native keychains +// ("wincred" for Windows, "pass" for linux and "osxkeychain" for macOS), +// or any program that follows the docker-credentials-helper protocol. +// +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login#credentials-store +func NewNativeStore(helperSuffix string) Store { + return &nativeStore{ + exec: executer.New(remoteCredentialsPrefix + helperSuffix), + } +} + +// NewDefaultNativeStore returns a native store based on the platform-default +// docker credentials helper and a bool indicating if the native store is +// available. +// - Windows: "wincred" +// - Linux: "pass" or "secretservice" +// - macOS: "osxkeychain" +// +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login/#credentials-store +func NewDefaultNativeStore() (Store, bool) { + if helper := getDefaultHelperSuffix(); helper != "" { + return NewNativeStore(helper), true + } + return nil, false +} + +// Get retrieves credentials from the store for the given server. +func (ns *nativeStore) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + var cred auth.Credential + out, err := ns.exec.Execute(ctx, strings.NewReader(serverAddress), "get") + if err != nil { + if err.Error() == errCredentialsNotFoundMessage { + // do not return an error if the credentials are not in the keychain. + return auth.EmptyCredential, nil + } + return auth.EmptyCredential, err + } + var dockerCred dockerCredentials + if err := json.Unmarshal(out, &dockerCred); err != nil { + return auth.EmptyCredential, err + } + // bearer auth is used if the username is "" + if dockerCred.Username == emptyUsername { + cred.RefreshToken = dockerCred.Secret + } else { + cred.Username = dockerCred.Username + cred.Password = dockerCred.Secret + } + return cred, nil +} + +// Put saves credentials into the store. +func (ns *nativeStore) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + dockerCred := &dockerCredentials{ + ServerURL: serverAddress, + Username: cred.Username, + Secret: cred.Password, + } + if cred.RefreshToken != "" { + dockerCred.Username = emptyUsername + dockerCred.Secret = cred.RefreshToken + } + credJSON, err := json.Marshal(dockerCred) + if err != nil { + return err + } + _, err = ns.exec.Execute(ctx, bytes.NewReader(credJSON), "store") + return err +} + +// Delete removes credentials from the store for the given server. +func (ns *nativeStore) Delete(ctx context.Context, serverAddress string) error { + _, err := ns.exec.Execute(ctx, strings.NewReader(serverAddress), "erase") + return err +} + +// getDefaultHelperSuffix returns the default credential helper suffix. +func getDefaultHelperSuffix() string { + platformDefault := getPlatformDefaultHelperSuffix() + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go new file mode 100644 index 000000000000..1a9aca6fc708 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go @@ -0,0 +1,23 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "osxkeychain" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go new file mode 100644 index 000000000000..5c7d4a3b27ad --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go @@ -0,0 +1,25 @@ +//go:build !windows && !darwin && !linux + +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go new file mode 100644 index 000000000000..f182923b7a15 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go @@ -0,0 +1,29 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import "os/exec" + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go new file mode 100644 index 000000000000..e334cc79b158 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go @@ -0,0 +1,23 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "wincred" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go new file mode 100644 index 000000000000..39735b77cb28 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go @@ -0,0 +1,102 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "context" + "errors" + "fmt" + + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" +) + +// ErrClientTypeUnsupported is thrown by Login() when the registry's client type +// is not supported. +var ErrClientTypeUnsupported = errors.New("client type not supported") + +// Login provides the login functionality with the given credentials. The target +// registry's client should be nil or of type *auth.Client. Login uses +// a client local to the function and will not modify the original client of +// the registry. +func Login(ctx context.Context, store Store, reg *remote.Registry, cred auth.Credential) error { + // create a clone of the original registry for login purpose + regClone := *reg + // we use the original client if applicable, otherwise use a default client + var authClient auth.Client + if reg.Client == nil { + authClient = *auth.DefaultClient + authClient.Cache = nil // no cache + } else if client, ok := reg.Client.(*auth.Client); ok { + authClient = *client + } else { + return ErrClientTypeUnsupported + } + regClone.Client = &authClient + // update credentials with the client + authClient.Credential = auth.StaticCredential(reg.Reference.Registry, cred) + // validate and store the credential + if err := regClone.Ping(ctx); err != nil { + return fmt.Errorf("failed to validate the credentials for %s: %w", regClone.Reference.Registry, err) + } + hostname := ServerAddressFromRegistry(regClone.Reference.Registry) + if err := store.Put(ctx, hostname, cred); err != nil { + return fmt.Errorf("failed to store the credentials for %s: %w", hostname, err) + } + return nil +} + +// Logout provides the logout functionality given the registry name. +func Logout(ctx context.Context, store Store, registryName string) error { + registryName = ServerAddressFromRegistry(registryName) + if err := store.Delete(ctx, registryName); err != nil { + return fmt.Errorf("failed to delete the credential for %s: %w", registryName, err) + } + return nil +} + +// Credential returns a Credential() function that can be used by auth.Client. +func Credential(store Store) auth.CredentialFunc { + return func(ctx context.Context, hostport string) (auth.Credential, error) { + hostport = ServerAddressFromHostname(hostport) + if hostport == "" { + return auth.EmptyCredential, nil + } + return store.Get(ctx, hostport) + } +} + +// ServerAddressFromRegistry maps a registry to a server address, which is used as +// a key for credentials store. The Docker CLI expects that the credentials of +// the registry 'docker.io' will be added under the key "https://index.docker.io/v1/". +// See: https://github.com/moby/moby/blob/v24.0.2/registry/config.go#L25-L48 +func ServerAddressFromRegistry(registry string) string { + if registry == "docker.io" { + return "https://index.docker.io/v1/" + } + return registry +} + +// ServerAddressFromHostname maps a hostname to a server address, which is used as +// a key for credentials store. It is expected that the traffic targetting the +// host "registry-1.docker.io" will be redirected to "https://index.docker.io/v1/". +// See: https://github.com/moby/moby/blob/v24.0.2/registry/config.go#L25-L48 +func ServerAddressFromHostname(hostname string) string { + if hostname == "registry-1.docker.io" { + return "https://index.docker.io/v1/" + } + return hostname +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go new file mode 100644 index 000000000000..e26a98ae7ae5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go @@ -0,0 +1,262 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package credentials supports reading, saving, and removing credentials from +// Docker configuration files and external credential stores that follow +// the Docker credential helper protocol. +// +// Reference: https://docs.docker.com/engine/reference/commandline/login/#credential-stores +package credentials + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/config" +) + +const ( + dockerConfigDirEnv = "DOCKER_CONFIG" + dockerConfigFileDir = ".docker" + dockerConfigFileName = "config.json" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Get retrieves credentials from the store for the given server address. + Get(ctx context.Context, serverAddress string) (auth.Credential, error) + // Put saves credentials into the store for the given server address. + Put(ctx context.Context, serverAddress string, cred auth.Credential) error + // Delete removes credentials from the store for the given server address. + Delete(ctx context.Context, serverAddress string) error +} + +// DynamicStore dynamically determines which store to use based on the settings +// in the config file. +type DynamicStore struct { + config *config.Config + options StoreOptions + detectedCredsStore string + setCredsStoreOnce syncutil.OnceOrRetry +} + +// StoreOptions provides options for NewStore. +type StoreOptions struct { + // AllowPlaintextPut allows saving credentials in plaintext in the config + // file. + // - If AllowPlaintextPut is set to false (default value), Put() will + // return an error when native store is not available. + // - If AllowPlaintextPut is set to true, Put() will save credentials in + // plaintext in the config file when native store is not available. + AllowPlaintextPut bool + + // DetectDefaultNativeStore enables detecting the platform-default native + // credentials store when the config file has no authentication information. + // + // If DetectDefaultNativeStore is set to true, the store will detect and set + // the default native credentials store in the "credsStore" field of the + // config file. + // - Windows: "wincred" + // - Linux: "pass" or "secretservice" + // - macOS: "osxkeychain" + // + // References: + // - https://docs.docker.com/engine/reference/commandline/login/#credentials-store + // - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties + DetectDefaultNativeStore bool +} + +// NewStore returns a Store based on the given configuration file. +// +// For Get(), Put() and Delete(), the returned Store will dynamically determine +// which underlying credentials store to use for the given server address. +// The underlying credentials store is determined in the following order: +// 1. Native server-specific credential helper +// 2. Native credentials store +// 3. The plain-text config file itself +// +// References: +// - https://docs.docker.com/engine/reference/commandline/login/#credentials-store +// - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +func NewStore(configPath string, opts StoreOptions) (*DynamicStore, error) { + cfg, err := config.Load(configPath) + if err != nil { + return nil, err + } + ds := &DynamicStore{ + config: cfg, + options: opts, + } + if opts.DetectDefaultNativeStore && !cfg.IsAuthConfigured() { + // no authentication configured, detect the default credentials store + ds.detectedCredsStore = getDefaultHelperSuffix() + } + return ds, nil +} + +// NewStoreFromDocker returns a Store based on the default docker config file. +// - If the $DOCKER_CONFIG environment variable is set, +// $DOCKER_CONFIG/config.json will be used. +// - Otherwise, the default location $HOME/.docker/config.json will be used. +// +// NewStoreFromDocker internally calls [NewStore]. +// +// References: +// - https://docs.docker.com/engine/reference/commandline/cli/#configuration-files +// - https://docs.docker.com/engine/reference/commandline/cli/#change-the-docker-directory +func NewStoreFromDocker(opt StoreOptions) (*DynamicStore, error) { + configPath, err := getDockerConfigPath() + if err != nil { + return nil, err + } + return NewStore(configPath, opt) +} + +// Get retrieves credentials from the store for the given server address. +func (ds *DynamicStore) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + return ds.getStore(serverAddress).Get(ctx, serverAddress) +} + +// Put saves credentials into the store for the given server address. +// Put returns ErrPlaintextPutDisabled if native store is not available and +// [StoreOptions].AllowPlaintextPut is set to false. +func (ds *DynamicStore) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + if err := ds.getStore(serverAddress).Put(ctx, serverAddress, cred); err != nil { + return err + } + // save the detected creds store back to the config file on first put + return ds.setCredsStoreOnce.Do(func() error { + if ds.detectedCredsStore != "" { + if err := ds.config.SetCredentialsStore(ds.detectedCredsStore); err != nil { + return fmt.Errorf("failed to set credsStore: %w", err) + } + } + return nil + }) +} + +// Delete removes credentials from the store for the given server address. +func (ds *DynamicStore) Delete(ctx context.Context, serverAddress string) error { + return ds.getStore(serverAddress).Delete(ctx, serverAddress) +} + +// IsAuthConfigured returns whether there is authentication configured in the +// config file or not. +// +// IsAuthConfigured returns true when: +// - The "credsStore" field is not empty +// - Or the "credHelpers" field is not empty +// - Or there is any entry in the "auths" field +func (ds *DynamicStore) IsAuthConfigured() bool { + return ds.config.IsAuthConfigured() +} + +// ConfigPath returns the path to the config file. +func (ds *DynamicStore) ConfigPath() string { + return ds.config.Path() +} + +// getHelperSuffix returns the credential helper suffix for the given server +// address. +func (ds *DynamicStore) getHelperSuffix(serverAddress string) string { + // 1. Look for a server-specific credential helper first + if helper := ds.config.GetCredentialHelper(serverAddress); helper != "" { + return helper + } + // 2. Then look for the configured native store + if credsStore := ds.config.CredentialsStore(); credsStore != "" { + return credsStore + } + // 3. Use the detected default store + return ds.detectedCredsStore +} + +// getStore returns a store for the given server address. +func (ds *DynamicStore) getStore(serverAddress string) Store { + if helper := ds.getHelperSuffix(serverAddress); helper != "" { + return NewNativeStore(helper) + } + + fs := newFileStore(ds.config) + fs.DisablePut = !ds.options.AllowPlaintextPut + return fs +} + +// getDockerConfigPath returns the path to the default docker config file. +func getDockerConfigPath() (string, error) { + // first try the environment variable + configDir := os.Getenv(dockerConfigDirEnv) + if configDir == "" { + // then try home directory + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("failed to get user home directory: %w", err) + } + configDir = filepath.Join(homeDir, dockerConfigFileDir) + } + return filepath.Join(configDir, dockerConfigFileName), nil +} + +// storeWithFallbacks is a store that has multiple fallback stores. +type storeWithFallbacks struct { + stores []Store +} + +// NewStoreWithFallbacks returns a new store based on the given stores. +// - Get() searches the primary and the fallback stores +// for the credentials and returns when it finds the +// credentials in any of the stores. +// - Put() saves the credentials into the primary store. +// - Delete() deletes the credentials from the primary store. +func NewStoreWithFallbacks(primary Store, fallbacks ...Store) Store { + if len(fallbacks) == 0 { + return primary + } + return &storeWithFallbacks{ + stores: append([]Store{primary}, fallbacks...), + } +} + +// Get retrieves credentials from the StoreWithFallbacks for the given server. +// It searches the primary and the fallback stores for the credentials of serverAddress +// and returns when it finds the credentials in any of the stores. +func (sf *storeWithFallbacks) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + for _, s := range sf.stores { + cred, err := s.Get(ctx, serverAddress) + if err != nil { + return auth.EmptyCredential, err + } + if cred != auth.EmptyCredential { + return cred, nil + } + } + return auth.EmptyCredential, nil +} + +// Put saves credentials into the StoreWithFallbacks. It puts +// the credentials into the primary store. +func (sf *storeWithFallbacks) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + return sf.stores[0].Put(ctx, serverAddress, cred) +} + +// Delete removes credentials from the StoreWithFallbacks for the given server. +// It deletes the credentials from the primary store. +func (sf *storeWithFallbacks) Delete(ctx context.Context, serverAddress string) error { + return sf.stores[0].Delete(ctx, serverAddress) +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go new file mode 100644 index 000000000000..b7cd8683c464 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go @@ -0,0 +1,94 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import "context" + +// executableTraceContextKey is a value key used to retrieve the ExecutableTrace +// from Context. +type executableTraceContextKey struct{} + +// ExecutableTrace is a set of hooks used to trace the execution of binary +// executables. Any particular hook may be nil. +type ExecutableTrace struct { + // ExecuteStart is called before the execution of the executable. The + // executableName parameter is the name of the credential helper executable + // used with NativeStore. The action parameter is one of "store", "get" and + // "erase". + // + // Reference: + // - https://docs.docker.com/engine/reference/commandline/login#credentials-store + ExecuteStart func(executableName string, action string) + + // ExecuteDone is called after the execution of an executable completes. + // The executableName parameter is the name of the credential helper + // executable used with NativeStore. The action parameter is one of "store", + // "get" and "erase". The err parameter is the error (if any) returned from + // the execution. + // + // Reference: + // - https://docs.docker.com/engine/reference/commandline/login#credentials-store + ExecuteDone func(executableName string, action string, err error) +} + +// ContextExecutableTrace returns the ExecutableTrace associated with the +// context. If none, it returns nil. +func ContextExecutableTrace(ctx context.Context) *ExecutableTrace { + trace, _ := ctx.Value(executableTraceContextKey{}).(*ExecutableTrace) + return trace +} + +// WithExecutableTrace takes a Context and an ExecutableTrace, and returns a +// Context with the ExecutableTrace added as a Value. If the Context has a +// previously added trace, the hooks defined in the new trace will be added +// in addition to the previous ones. The recent hooks will be called first. +func WithExecutableTrace(ctx context.Context, trace *ExecutableTrace) context.Context { + if trace == nil { + return ctx + } + if oldTrace := ContextExecutableTrace(ctx); oldTrace != nil { + trace.compose(oldTrace) + } + return context.WithValue(ctx, executableTraceContextKey{}, trace) +} + +// compose takes an oldTrace and modifies the existing trace to include +// the hooks defined in the oldTrace. The hooks in the existing trace will +// be called first. +func (trace *ExecutableTrace) compose(oldTrace *ExecutableTrace) { + if oldStart := oldTrace.ExecuteStart; oldStart != nil { + start := trace.ExecuteStart + if start != nil { + trace.ExecuteStart = func(executableName, action string) { + start(executableName, action) + oldStart(executableName, action) + } + } else { + trace.ExecuteStart = oldStart + } + } + if oldDone := oldTrace.ExecuteDone; oldDone != nil { + done := trace.ExecuteDone + if done != nil { + trace.ExecuteDone = func(executableName, action string, err error) { + done(executableName, action, err) + oldDone(executableName, action, err) + } + } else { + trace.ExecuteDone = oldDone + } + } +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go b/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go new file mode 100644 index 000000000000..9f87d86d1728 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go @@ -0,0 +1,128 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errcode + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "unicode" +) + +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#error-codes +// - https://docs.docker.com/registry/spec/api/#errors-2 +const ( + ErrorCodeBlobUnknown = "BLOB_UNKNOWN" + ErrorCodeBlobUploadInvalid = "BLOB_UPLOAD_INVALID" + ErrorCodeBlobUploadUnknown = "BLOB_UPLOAD_UNKNOWN" + ErrorCodeDigestInvalid = "DIGEST_INVALID" + ErrorCodeManifestBlobUnknown = "MANIFEST_BLOB_UNKNOWN" + ErrorCodeManifestInvalid = "MANIFEST_INVALID" + ErrorCodeManifestUnknown = "MANIFEST_UNKNOWN" + ErrorCodeNameInvalid = "NAME_INVALID" + ErrorCodeNameUnknown = "NAME_UNKNOWN" + ErrorCodeSizeInvalid = "SIZE_INVALID" + ErrorCodeUnauthorized = "UNAUTHORIZED" + ErrorCodeDenied = "DENIED" + ErrorCodeUnsupported = "UNSUPPORTED" +) + +// Error represents a response inner error returned by the remote +// registry. +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#error-codes +// - https://docs.docker.com/registry/spec/api/#errors-2 +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Detail any `json:"detail,omitempty"` +} + +// Error returns a error string describing the error. +func (e Error) Error() string { + code := strings.Map(func(r rune) rune { + if r == '_' { + return ' ' + } + return unicode.ToLower(r) + }, e.Code) + if e.Message == "" { + return code + } + if e.Detail == nil { + return fmt.Sprintf("%s: %s", code, e.Message) + } + return fmt.Sprintf("%s: %s: %v", code, e.Message, e.Detail) +} + +// Errors represents a list of response inner errors returned by the remote +// server. +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#error-codes +// - https://docs.docker.com/registry/spec/api/#errors-2 +type Errors []Error + +// Error returns a error string describing the error. +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + } + var errmsgs []string + for _, err := range errs { + errmsgs = append(errmsgs, err.Error()) + } + return strings.Join(errmsgs, "; ") +} + +// Unwrap returns the inner error only when there is exactly one error. +func (errs Errors) Unwrap() error { + if len(errs) == 1 { + return errs[0] + } + return nil +} + +// ErrorResponse represents an error response. +type ErrorResponse struct { + Method string + URL *url.URL + StatusCode int + Errors Errors +} + +// Error returns a error string describing the error. +func (err *ErrorResponse) Error() string { + var errmsg string + if len(err.Errors) > 0 { + errmsg = err.Errors.Error() + } else { + errmsg = http.StatusText(err.StatusCode) + } + return fmt.Sprintf("%s %q: response status code %d: %s", err.Method, err.URL, err.StatusCode, errmsg) +} + +// Unwrap returns the internal errors of err if any. +func (err *ErrorResponse) Unwrap() error { + if len(err.Errors) == 0 { + return nil + } + return err.Errors +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go b/vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go new file mode 100644 index 000000000000..52dc3612aad3 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go @@ -0,0 +1,54 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errutil + +import ( + "encoding/json" + "errors" + "io" + "net/http" + + "oras.land/oras-go/v2/registry/remote/errcode" +) + +// maxErrorBytes specifies the default limit on how many response bytes are +// allowed in the server's error response. +// A typical error message is around 200 bytes. Hence, 8 KiB should be +// sufficient. +const maxErrorBytes int64 = 8 * 1024 // 8 KiB + +// ParseErrorResponse parses the error returned by the remote registry. +func ParseErrorResponse(resp *http.Response) error { + resultErr := &errcode.ErrorResponse{ + Method: resp.Request.Method, + URL: resp.Request.URL, + StatusCode: resp.StatusCode, + } + var body struct { + Errors errcode.Errors `json:"errors"` + } + lr := io.LimitReader(resp.Body, maxErrorBytes) + if err := json.NewDecoder(lr).Decode(&body); err == nil { + resultErr.Errors = body.Errors + } + return resultErr +} + +// IsErrorCode returns true if err is an Error and its Code equals to code. +func IsErrorCode(err error, code string) bool { + var ec errcode.Error + return errors.As(err, &ec) && ec.Code == code +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/manifest.go b/vendor/oras.land/oras-go/v2/registry/remote/manifest.go new file mode 100644 index 000000000000..0e10297cb152 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/manifest.go @@ -0,0 +1,59 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/internal/docker" + "oras.land/oras-go/v2/internal/spec" +) + +// defaultManifestMediaTypes contains the default set of manifests media types. +var defaultManifestMediaTypes = []string{ + docker.MediaTypeManifest, + docker.MediaTypeManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, + spec.MediaTypeArtifactManifest, +} + +// defaultManifestAcceptHeader is the default set in the `Accept` header for +// resolving manifests from tags. +var defaultManifestAcceptHeader = strings.Join(defaultManifestMediaTypes, ", ") + +// isManifest determines if the given descriptor points to a manifest. +func isManifest(manifestMediaTypes []string, desc ocispec.Descriptor) bool { + if len(manifestMediaTypes) == 0 { + manifestMediaTypes = defaultManifestMediaTypes + } + for _, mediaType := range manifestMediaTypes { + if desc.MediaType == mediaType { + return true + } + } + return false +} + +// manifestAcceptHeader generates the set in the `Accept` header for resolving +// manifests from tags. +func manifestAcceptHeader(manifestMediaTypes []string) string { + if len(manifestMediaTypes) == 0 { + return defaultManifestAcceptHeader + } + return strings.Join(manifestMediaTypes, ", ") +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/referrers.go b/vendor/oras.land/oras-go/v2/registry/remote/referrers.go new file mode 100644 index 000000000000..7466808937d5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/referrers.go @@ -0,0 +1,221 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "errors" + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/internal/descriptor" +) + +// zeroDigest represents a digest that consists of zeros. zeroDigest is used +// for pinging Referrers API. +const zeroDigest = "sha256:0000000000000000000000000000000000000000000000000000000000000000" + +// referrersState represents the state of Referrers API. +type referrersState = int32 + +const ( + // referrersStateUnknown represents an unknown state of Referrers API. + referrersStateUnknown referrersState = iota + // referrersStateSupported represents that the repository is known to + // support Referrers API. + referrersStateSupported + // referrersStateUnsupported represents that the repository is known to + // not support Referrers API. + referrersStateUnsupported +) + +// referrerOperation represents an operation on a referrer. +type referrerOperation = int32 + +const ( + // referrerOperationAdd represents an addition operation on a referrer. + referrerOperationAdd referrerOperation = iota + // referrerOperationRemove represents a removal operation on a referrer. + referrerOperationRemove +) + +// referrerChange represents a change on a referrer. +type referrerChange struct { + referrer ocispec.Descriptor + operation referrerOperation +} + +var ( + // ErrReferrersCapabilityAlreadySet is returned by SetReferrersCapability() + // when the Referrers API capability has been already set. + ErrReferrersCapabilityAlreadySet = errors.New("referrers capability cannot be changed once set") + + // errNoReferrerUpdate is returned by applyReferrerChanges() when there + // is no any referrer update. + errNoReferrerUpdate = errors.New("no referrer update") +) + +const ( + // opDeleteReferrersIndex represents the operation for deleting a + // referrers index. + opDeleteReferrersIndex = "DeleteReferrersIndex" +) + +// ReferrersError records an error and the operation and the subject descriptor. +type ReferrersError struct { + // Op represents the failing operation. + Op string + // Subject is the descriptor of referenced artifact. + Subject ocispec.Descriptor + // Err is the entity of referrers error. + Err error +} + +// Error returns error msg of IgnorableError. +func (e *ReferrersError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the inner error of IgnorableError. +func (e *ReferrersError) Unwrap() error { + return errors.Unwrap(e.Err) +} + +// IsIndexDelete tells if e is kind of error related to referrers +// index deletion. +func (e *ReferrersError) IsReferrersIndexDelete() bool { + return e.Op == opDeleteReferrersIndex +} + +// buildReferrersTag builds the referrers tag for the given manifest descriptor. +// Format: - +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#unavailable-referrers-api +func buildReferrersTag(desc ocispec.Descriptor) string { + alg := desc.Digest.Algorithm().String() + encoded := desc.Digest.Encoded() + return alg + "-" + encoded +} + +// isReferrersFilterApplied checks if requsted is in the applied filter list. +func isReferrersFilterApplied(applied, requested string) bool { + if applied == "" || requested == "" { + return false + } + filters := strings.Split(applied, ",") + for _, f := range filters { + if f == requested { + return true + } + } + return false +} + +// filterReferrers filters a slice of referrers by artifactType in place. +// The returned slice contains matching referrers. +func filterReferrers(refs []ocispec.Descriptor, artifactType string) []ocispec.Descriptor { + if artifactType == "" { + return refs + } + var j int + for i, ref := range refs { + if ref.ArtifactType == artifactType { + if i != j { + refs[j] = ref + } + j++ + } + } + return refs[:j] +} + +// applyReferrerChanges applies referrerChanges on referrers and returns the +// updated referrers. +// Returns errNoReferrerUpdate if there is no any referrers updates. +func applyReferrerChanges(referrers []ocispec.Descriptor, referrerChanges []referrerChange) ([]ocispec.Descriptor, error) { + referrersMap := make(map[descriptor.Descriptor]int, len(referrers)+len(referrerChanges)) + updatedReferrers := make([]ocispec.Descriptor, 0, len(referrers)+len(referrerChanges)) + var updateRequired bool + for _, r := range referrers { + if content.Equal(r, ocispec.Descriptor{}) { + // skip bad entry + updateRequired = true + continue + } + key := descriptor.FromOCI(r) + if _, ok := referrersMap[key]; ok { + // skip duplicates + updateRequired = true + continue + } + updatedReferrers = append(updatedReferrers, r) + referrersMap[key] = len(updatedReferrers) - 1 + } + + // apply changes + for _, change := range referrerChanges { + key := descriptor.FromOCI(change.referrer) + switch change.operation { + case referrerOperationAdd: + if _, ok := referrersMap[key]; !ok { + // add distinct referrers + updatedReferrers = append(updatedReferrers, change.referrer) + referrersMap[key] = len(updatedReferrers) - 1 + } + case referrerOperationRemove: + if pos, ok := referrersMap[key]; ok { + // remove referrers that are already in the map + updatedReferrers[pos] = ocispec.Descriptor{} + delete(referrersMap, key) + } + } + } + + // skip unnecessary update + if !updateRequired && len(referrersMap) == len(referrers) { + // if the result referrer map contains the same content as the + // original referrers, consider that there is no update on the + // referrers. + for _, r := range referrers { + key := descriptor.FromOCI(r) + if _, ok := referrersMap[key]; !ok { + updateRequired = true + } + } + if !updateRequired { + return nil, errNoReferrerUpdate + } + } + + return removeEmptyDescriptors(updatedReferrers, len(referrersMap)), nil +} + +// removeEmptyDescriptors in-place removes empty items from descs, given a hint +// of the number of non-empty descriptors. +func removeEmptyDescriptors(descs []ocispec.Descriptor, hint int) []ocispec.Descriptor { + j := 0 + for i, r := range descs { + if !content.Equal(r, ocispec.Descriptor{}) { + if i > j { + descs[j] = r + } + j++ + } + if j == hint { + break + } + } + return descs[:j] +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/registry.go b/vendor/oras.land/oras-go/v2/registry/remote/registry.go new file mode 100644 index 000000000000..1099b585d5e5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/registry.go @@ -0,0 +1,190 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remote provides a client to the remote registry. +// Reference: https://github.com/distribution/distribution +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/registry" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/internal/errutil" +) + +// RepositoryOptions is an alias of Repository to avoid name conflicts. +// It also hides all methods associated with Repository. +type RepositoryOptions Repository + +// Registry is an HTTP client to a remote registry. +type Registry struct { + // RepositoryOptions contains common options for Registry and Repository. + // It is also used as a template for derived repositories. + RepositoryOptions + + // RepositoryListPageSize specifies the page size when invoking the catalog + // API. + // If zero, the page size is determined by the remote registry. + // Reference: https://docs.docker.com/registry/spec/api/#catalog + RepositoryListPageSize int +} + +// NewRegistry creates a client to the remote registry with the specified domain +// name. +// Example: localhost:5000 +func NewRegistry(name string) (*Registry, error) { + ref := registry.Reference{ + Registry: name, + } + if err := ref.ValidateRegistry(); err != nil { + return nil, err + } + return &Registry{ + RepositoryOptions: RepositoryOptions{ + Reference: ref, + }, + }, nil +} + +// client returns an HTTP client used to access the remote registry. +// A default HTTP client is return if the client is not configured. +func (r *Registry) client() Client { + if r.Client == nil { + return auth.DefaultClient + } + return r.Client +} + +// do sends an HTTP request and returns an HTTP response using the HTTP client +// returned by r.client(). +func (r *Registry) do(req *http.Request) (*http.Response, error) { + if r.HandleWarning == nil { + return r.client().Do(req) + } + + resp, err := r.client().Do(req) + if err != nil { + return nil, err + } + handleWarningHeaders(resp.Header.Values(headerWarning), r.HandleWarning) + return resp, nil +} + +// Ping checks whether or not the registry implement Docker Registry API V2 or +// OCI Distribution Specification. +// Ping can be used to check authentication when an auth client is configured. +// +// References: +// - https://docs.docker.com/registry/spec/api/#base +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#api +func (r *Registry) Ping(ctx context.Context) error { + url := buildRegistryBaseURL(r.PlainHTTP, r.Reference) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err + } + + resp, err := r.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusNotFound: + return errdef.ErrNotFound + default: + return errutil.ParseErrorResponse(resp) + } +} + +// Repositories lists the name of repositories available in the registry. +// See also `RepositoryListPageSize`. +// +// If `last` is NOT empty, the entries in the response start after the +// repo specified by `last`. Otherwise, the response starts from the top +// of the Repositories list. +// +// Reference: https://docs.docker.com/registry/spec/api/#catalog +func (r *Registry) Repositories(ctx context.Context, last string, fn func(repos []string) error) error { + ctx = auth.AppendScopesForHost(ctx, r.Reference.Host(), auth.ScopeRegistryCatalog) + url := buildRegistryCatalogURL(r.PlainHTTP, r.Reference) + var err error + for err == nil { + url, err = r.repositories(ctx, last, fn, url) + // clear `last` for subsequent pages + last = "" + } + if err != errNoLink { + return err + } + return nil +} + +// repositories returns a single page of repository list with the next link. +func (r *Registry) repositories(ctx context.Context, last string, fn func(repos []string) error, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", err + } + if r.RepositoryListPageSize > 0 || last != "" { + q := req.URL.Query() + if r.RepositoryListPageSize > 0 { + q.Set("n", strconv.Itoa(r.RepositoryListPageSize)) + } + if last != "" { + q.Set("last", last) + } + req.URL.RawQuery = q.Encode() + } + resp, err := r.do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + var page struct { + Repositories []string `json:"repositories"` + } + lr := limitReader(resp.Body, r.MaxMetadataBytes) + if err := json.NewDecoder(lr).Decode(&page); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if err := fn(page.Repositories); err != nil { + return "", err + } + + return parseLink(resp) +} + +// Repository returns a repository reference by the given name. +func (r *Registry) Repository(ctx context.Context, name string) (registry.Repository, error) { + ref := registry.Reference{ + Registry: r.Reference.Registry, + Repository: name, + } + return newRepositoryWithOptions(ref, &r.RepositoryOptions) +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/repository.go b/vendor/oras.land/oras-go/v2/registry/remote/repository.go new file mode 100644 index 000000000000..7c36dc1c11ef --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/repository.go @@ -0,0 +1,1667 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "mime" + "net/http" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/cas" + "oras.land/oras-go/v2/internal/httputil" + "oras.land/oras-go/v2/internal/ioutil" + "oras.land/oras-go/v2/internal/spec" + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/errcode" + "oras.land/oras-go/v2/registry/remote/internal/errutil" +) + +const ( + // headerDockerContentDigest is the "Docker-Content-Digest" header. + // If present on the response, it contains the canonical digest of the + // uploaded blob. + // + // References: + // - https://docs.docker.com/registry/spec/api/#digest-header + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pull + headerDockerContentDigest = "Docker-Content-Digest" + + // headerOCIFiltersApplied is the "OCI-Filters-Applied" header. + // If present on the response, it contains a comma-separated list of the + // applied filters. + // + // Reference: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers + headerOCIFiltersApplied = "OCI-Filters-Applied" + + // headerOCISubject is the "OCI-Subject" header. + // If present on the response, it contains the digest of the subject, + // indicating that Referrers API is supported by the registry. + headerOCISubject = "OCI-Subject" +) + +// filterTypeArtifactType is the "artifactType" filter applied on the list of +// referrers. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +const filterTypeArtifactType = "artifactType" + +// Client is an interface for a HTTP client. +type Client interface { + // Do sends an HTTP request and returns an HTTP response. + // + // Unlike http.RoundTripper, Client can attempt to interpret the response + // and handle higher-level protocol details such as redirects and + // authentication. + // + // Like http.RoundTripper, Client should not modify the request, and must + // always close the request body. + Do(*http.Request) (*http.Response, error) +} + +// Repository is an HTTP client to a remote repository. +type Repository struct { + // Client is the underlying HTTP client used to access the remote registry. + // If nil, auth.DefaultClient is used. + Client Client + + // Reference references the remote repository. + Reference registry.Reference + + // PlainHTTP signals the transport to access the remote repository via HTTP + // instead of HTTPS. + PlainHTTP bool + + // ManifestMediaTypes is used in `Accept` header for resolving manifests + // from references. It is also used in identifying manifests and blobs from + // descriptors. If an empty list is present, default manifest media types + // are used. + ManifestMediaTypes []string + + // TagListPageSize specifies the page size when invoking the tag list API. + // If zero, the page size is determined by the remote registry. + // Reference: https://docs.docker.com/registry/spec/api/#tags + TagListPageSize int + + // ReferrerListPageSize specifies the page size when invoking the Referrers + // API. + // If zero, the page size is determined by the remote registry. + // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers + ReferrerListPageSize int + + // MaxMetadataBytes specifies a limit on how many response bytes are allowed + // in the server's response to the metadata APIs, such as catalog list, tag + // list, and referrers list. + // If less than or equal to zero, a default (currently 4MiB) is used. + MaxMetadataBytes int64 + + // SkipReferrersGC specifies whether to delete the dangling referrers + // index when referrers tag schema is utilized. + // - If false, the old referrers index will be deleted after the new one + // is successfully uploaded. + // - If true, the old referrers index is kept. + // By default, it is disabled (set to false). See also: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#referrers-tag-schema + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pushing-manifests-with-subject + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#deleting-manifests + SkipReferrersGC bool + + // HandleWarning handles the warning returned by the remote server. + // Callers SHOULD deduplicate warnings from multiple associated responses. + // + // References: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#warnings + // - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + HandleWarning func(warning Warning) + + // NOTE: Must keep fields in sync with clone(). + + // referrersState represents that if the repository supports Referrers API. + // default: referrersStateUnknown + referrersState referrersState + + // referrersPingLock locks the pingReferrers() method and allows only + // one go-routine to send the request. + referrersPingLock sync.Mutex + + // referrersMergePool provides a way to manage concurrent updates to a + // referrers index tagged by referrers tag schema. + referrersMergePool syncutil.Pool[syncutil.Merge[referrerChange]] +} + +// NewRepository creates a client to the remote repository identified by a +// reference. +// Example: localhost:5000/hello-world +func NewRepository(reference string) (*Repository, error) { + ref, err := registry.ParseReference(reference) + if err != nil { + return nil, err + } + return &Repository{ + Reference: ref, + }, nil +} + +// newRepositoryWithOptions returns a Repository with the given Reference and +// RepositoryOptions. +// +// RepositoryOptions are part of the Registry struct and set its defaults. +// RepositoryOptions shares the same struct definition as Repository, which +// contains unexported state that must not be copied to multiple Repositories. +// To handle this we explicitly copy only the fields that we want to reproduce. +func newRepositoryWithOptions(ref registry.Reference, opts *RepositoryOptions) (*Repository, error) { + if err := ref.ValidateRepository(); err != nil { + return nil, err + } + repo := (*Repository)(opts).clone() + repo.Reference = ref + return repo, nil +} + +// clone makes a copy of the Repository being careful not to copy non-copyable fields (sync.Mutex and syncutil.Pool types) +func (r *Repository) clone() *Repository { + return &Repository{ + Client: r.Client, + Reference: r.Reference, + PlainHTTP: r.PlainHTTP, + ManifestMediaTypes: slices.Clone(r.ManifestMediaTypes), + TagListPageSize: r.TagListPageSize, + ReferrerListPageSize: r.ReferrerListPageSize, + MaxMetadataBytes: r.MaxMetadataBytes, + SkipReferrersGC: r.SkipReferrersGC, + HandleWarning: r.HandleWarning, + } +} + +// SetReferrersCapability indicates the Referrers API capability of the remote +// repository. true: capable; false: not capable. +// +// SetReferrersCapability is valid only when it is called for the first time. +// SetReferrersCapability returns ErrReferrersCapabilityAlreadySet if the +// Referrers API capability has been already set. +// - When the capability is set to true, the Referrers() function will always +// request the Referrers API. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +// - When the capability is set to false, the Referrers() function will always +// request the Referrers Tag. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#referrers-tag-schema +// - When the capability is not set, the Referrers() function will automatically +// determine which API to use. +func (r *Repository) SetReferrersCapability(capable bool) error { + var state referrersState + if capable { + state = referrersStateSupported + } else { + state = referrersStateUnsupported + } + if swapped := atomic.CompareAndSwapInt32(&r.referrersState, referrersStateUnknown, state); !swapped { + if fact := r.loadReferrersState(); fact != state { + return fmt.Errorf("%w: current capability = %v, new capability = %v", + ErrReferrersCapabilityAlreadySet, + fact == referrersStateSupported, + capable) + } + } + return nil +} + +// setReferrersState atomically loads r.referrersState. +func (r *Repository) loadReferrersState() referrersState { + return atomic.LoadInt32(&r.referrersState) +} + +// client returns an HTTP client used to access the remote repository. +// A default HTTP client is return if the client is not configured. +func (r *Repository) client() Client { + if r.Client == nil { + return auth.DefaultClient + } + return r.Client +} + +// do sends an HTTP request and returns an HTTP response using the HTTP client +// returned by r.client(). +func (r *Repository) do(req *http.Request) (*http.Response, error) { + if r.HandleWarning == nil { + return r.client().Do(req) + } + + resp, err := r.client().Do(req) + if err != nil { + return nil, err + } + handleWarningHeaders(resp.Header.Values(headerWarning), r.HandleWarning) + return resp, nil +} + +// blobStore detects the blob store for the given descriptor. +func (r *Repository) blobStore(desc ocispec.Descriptor) registry.BlobStore { + if isManifest(r.ManifestMediaTypes, desc) { + return r.Manifests() + } + return r.Blobs() +} + +// Fetch fetches the content identified by the descriptor. +func (r *Repository) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { + return r.blobStore(target).Fetch(ctx, target) +} + +// Push pushes the content, matching the expected descriptor. +func (r *Repository) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { + return r.blobStore(expected).Push(ctx, expected, content) +} + +// Mount makes the blob with the given digest in fromRepo +// available in the repository signified by the receiver. +// +// This avoids the need to pull content down from fromRepo only to push it to r. +// +// If the registry does not implement mounting, getContent will be used to get the +// content to push. If getContent is nil, the content will be pulled from the source +// repository. If getContent returns an error, it will be wrapped inside the error +// returned from Mount. +func (r *Repository) Mount(ctx context.Context, desc ocispec.Descriptor, fromRepo string, getContent func() (io.ReadCloser, error)) error { + return r.Blobs().(registry.Mounter).Mount(ctx, desc, fromRepo, getContent) +} + +// Exists returns true if the described content exists. +func (r *Repository) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { + return r.blobStore(target).Exists(ctx, target) +} + +// Delete removes the content identified by the descriptor. +func (r *Repository) Delete(ctx context.Context, target ocispec.Descriptor) error { + return r.blobStore(target).Delete(ctx, target) +} + +// Blobs provides access to the blob CAS only, which contains config blobs, +// layers, and other generic blobs. +func (r *Repository) Blobs() registry.BlobStore { + return &blobStore{repo: r} +} + +// Manifests provides access to the manifest CAS only. +func (r *Repository) Manifests() registry.ManifestStore { + return &manifestStore{repo: r} +} + +// Resolve resolves a reference to a manifest descriptor. +// See also `ManifestMediaTypes`. +func (r *Repository) Resolve(ctx context.Context, reference string) (ocispec.Descriptor, error) { + return r.Manifests().Resolve(ctx, reference) +} + +// Tag tags a manifest descriptor with a reference string. +func (r *Repository) Tag(ctx context.Context, desc ocispec.Descriptor, reference string) error { + return r.Manifests().Tag(ctx, desc, reference) +} + +// PushReference pushes the manifest with a reference tag. +func (r *Repository) PushReference(ctx context.Context, expected ocispec.Descriptor, content io.Reader, reference string) error { + return r.Manifests().PushReference(ctx, expected, content, reference) +} + +// FetchReference fetches the manifest identified by the reference. +// The reference can be a tag or digest. +func (r *Repository) FetchReference(ctx context.Context, reference string) (ocispec.Descriptor, io.ReadCloser, error) { + return r.Manifests().FetchReference(ctx, reference) +} + +// ParseReference resolves a tag or a digest reference to a fully qualified +// reference from a base reference r.Reference. +// Tag, digest, or fully qualified references are accepted as input. +// +// If reference is a fully qualified reference, then ParseReference parses it +// and returns the parsed reference. If the parsed reference does not share +// the same base reference with the Repository r, ParseReference returns a +// wrapped error ErrInvalidReference. +func (r *Repository) ParseReference(reference string) (registry.Reference, error) { + ref, err := registry.ParseReference(reference) + if err != nil { + ref = registry.Reference{ + Registry: r.Reference.Registry, + Repository: r.Reference.Repository, + Reference: reference, + } + + // reference is not a FQDN + if index := strings.IndexByte(reference, '@'); index != -1 { + // `@` implies *digest*, so drop the *tag* (irrespective of what it is). + ref.Reference = reference[index+1:] + err = ref.ValidateReferenceAsDigest() + } else { + err = ref.ValidateReference() + } + + if err != nil { + return registry.Reference{}, err + } + } else if ref.Registry != r.Reference.Registry || ref.Repository != r.Reference.Repository { + return registry.Reference{}, fmt.Errorf( + "%w: mismatch between received %q and expected %q", + errdef.ErrInvalidReference, ref, r.Reference, + ) + } + + if len(ref.Reference) == 0 { + return registry.Reference{}, errdef.ErrInvalidReference + } + + return ref, nil +} + +// Tags lists the tags available in the repository. +// See also `TagListPageSize`. +// If `last` is NOT empty, the entries in the response start after the +// tag specified by `last`. Otherwise, the response starts from the top +// of the Tags list. +// +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#content-discovery +// - https://docs.docker.com/registry/spec/api/#tags +func (r *Repository) Tags(ctx context.Context, last string, fn func(tags []string) error) error { + ctx = auth.AppendRepositoryScope(ctx, r.Reference, auth.ActionPull) + url := buildRepositoryTagListURL(r.PlainHTTP, r.Reference) + var err error + for err == nil { + url, err = r.tags(ctx, last, fn, url) + // clear `last` for subsequent pages + last = "" + } + if err != errNoLink { + return err + } + return nil +} + +// tags returns a single page of tag list with the next link. +func (r *Repository) tags(ctx context.Context, last string, fn func(tags []string) error, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", err + } + if r.TagListPageSize > 0 || last != "" { + q := req.URL.Query() + if r.TagListPageSize > 0 { + q.Set("n", strconv.Itoa(r.TagListPageSize)) + } + if last != "" { + q.Set("last", last) + } + req.URL.RawQuery = q.Encode() + } + resp, err := r.do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + var page struct { + Tags []string `json:"tags"` + } + lr := limitReader(resp.Body, r.MaxMetadataBytes) + if err := json.NewDecoder(lr).Decode(&page); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if err := fn(page.Tags); err != nil { + return "", err + } + + return parseLink(resp) +} + +// Predecessors returns the descriptors of image or artifact manifests directly +// referencing the given manifest descriptor. +// Predecessors internally leverages Referrers. +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +func (r *Repository) Predecessors(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var res []ocispec.Descriptor + if err := r.Referrers(ctx, desc, "", func(referrers []ocispec.Descriptor) error { + res = append(res, referrers...) + return nil + }); err != nil { + return nil, err + } + return res, nil +} + +// Referrers lists the descriptors of image or artifact manifests directly +// referencing the given manifest descriptor. +// +// fn is called for each page of the referrers result. +// If artifactType is not empty, only referrers of the same artifact type are +// fed to fn. +// +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +func (r *Repository) Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { + state := r.loadReferrersState() + if state == referrersStateUnsupported { + // The repository is known to not support Referrers API, fallback to + // referrers tag schema. + return r.referrersByTagSchema(ctx, desc, artifactType, fn) + } + + err := r.referrersByAPI(ctx, desc, artifactType, fn) + if state == referrersStateSupported { + // The repository is known to support Referrers API, no fallback. + return err + } + + // The referrers state is unknown. + if err != nil { + if errors.Is(err, errdef.ErrUnsupported) { + // Referrers API is not supported, fallback to referrers tag schema. + r.SetReferrersCapability(false) + return r.referrersByTagSchema(ctx, desc, artifactType, fn) + } + return err + } + + r.SetReferrersCapability(true) + return nil +} + +// referrersByAPI lists the descriptors of manifests directly referencing +// the given manifest descriptor by requesting Referrers API. +// fn is called for the referrers result. If artifactType is not empty, +// only referrers of the same artifact type are fed to fn. +func (r *Repository) referrersByAPI(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { + ref := r.Reference + ref.Reference = desc.Digest.String() + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + + url := buildReferrersURL(r.PlainHTTP, ref, artifactType) + var err error + for err == nil { + url, err = r.referrersPageByAPI(ctx, artifactType, fn, url) + } + if err == errNoLink { + return nil + } + return err +} + +// referrersPageByAPI lists a single page of the descriptors of manifests +// directly referencing the given manifest descriptor. fn is called for +// a page of referrersPageByAPI result. +// If artifactType is not empty, only referrersPageByAPI of the same +// artifact type are fed to fn. +// referrersPageByAPI returns the link url for the next page. +func (r *Repository) referrersPageByAPI(ctx context.Context, artifactType string, fn func(referrers []ocispec.Descriptor) error, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", err + } + if r.ReferrerListPageSize > 0 { + q := req.URL.Query() + q.Set("n", strconv.Itoa(r.ReferrerListPageSize)) + req.URL.RawQuery = q.Encode() + } + + resp, err := r.do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + if errResp := errutil.ParseErrorResponse(resp); errutil.IsErrorCode(errResp, errcode.ErrorCodeNameUnknown) { + // The repository is not found, Referrers API status is unknown + return "", errResp + } + // Referrers API is not supported. + return "", fmt.Errorf("failed to query referrers API: %w", errdef.ErrUnsupported) + default: + return "", errutil.ParseErrorResponse(resp) + } + + // also check the content type + if ct := resp.Header.Get("Content-Type"); ct != ocispec.MediaTypeImageIndex { + return "", fmt.Errorf("unknown content returned (%s), expecting image index: %w", ct, errdef.ErrUnsupported) + } + + var index ocispec.Index + lr := limitReader(resp.Body, r.MaxMetadataBytes) + if err := json.NewDecoder(lr).Decode(&index); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + + referrers := index.Manifests + if artifactType != "" { + // check both filters header and filters annotations for compatibility + // latest spec for filters header: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers + // older spec for filters annotations: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers + filtersHeader := resp.Header.Get(headerOCIFiltersApplied) + filtersAnnotation := index.Annotations[spec.AnnotationReferrersFiltersApplied] + if !isReferrersFilterApplied(filtersHeader, filterTypeArtifactType) && + !isReferrersFilterApplied(filtersAnnotation, filterTypeArtifactType) { + // perform client side filtering if the filter is not applied on the server side + referrers = filterReferrers(referrers, artifactType) + } + } + if len(referrers) > 0 { + if err := fn(referrers); err != nil { + return "", err + } + } + return parseLink(resp) +} + +// referrersByTagSchema lists the descriptors of manifests directly +// referencing the given manifest descriptor by requesting referrers tag. +// fn is called for the referrers result. If artifactType is not empty, +// only referrers of the same artifact type are fed to fn. +// reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#backwards-compatibility +func (r *Repository) referrersByTagSchema(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { + referrersTag := buildReferrersTag(desc) + _, referrers, err := r.referrersFromIndex(ctx, referrersTag) + if err != nil { + if errors.Is(err, errdef.ErrNotFound) { + // no referrers to the manifest + return nil + } + return err + } + + filtered := filterReferrers(referrers, artifactType) + if len(filtered) == 0 { + return nil + } + return fn(filtered) +} + +// referrersFromIndex queries the referrers index using the the given referrers +// tag. If Succeeded, returns the descriptor of referrers index and the +// referrers list. +func (r *Repository) referrersFromIndex(ctx context.Context, referrersTag string) (ocispec.Descriptor, []ocispec.Descriptor, error) { + desc, rc, err := r.FetchReference(ctx, referrersTag) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + defer rc.Close() + + if err := limitSize(desc, r.MaxMetadataBytes); err != nil { + return ocispec.Descriptor{}, nil, fmt.Errorf("failed to read referrers index from referrers tag %s: %w", referrersTag, err) + } + var index ocispec.Index + if err := decodeJSON(rc, desc, &index); err != nil { + return ocispec.Descriptor{}, nil, fmt.Errorf("failed to decode referrers index from referrers tag %s: %w", referrersTag, err) + } + + return desc, index.Manifests, nil +} + +// pingReferrers returns true if the Referrers API is available for r. +func (r *Repository) pingReferrers(ctx context.Context) (bool, error) { + switch r.loadReferrersState() { + case referrersStateSupported: + return true, nil + case referrersStateUnsupported: + return false, nil + } + + // referrers state is unknown + // limit the rate of pinging referrers API + r.referrersPingLock.Lock() + defer r.referrersPingLock.Unlock() + + switch r.loadReferrersState() { + case referrersStateSupported: + return true, nil + case referrersStateUnsupported: + return false, nil + } + + ref := r.Reference + ref.Reference = zeroDigest + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + + url := buildReferrersURL(r.PlainHTTP, ref, "") + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, err + } + resp, err := r.do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + supported := resp.Header.Get("Content-Type") == ocispec.MediaTypeImageIndex + r.SetReferrersCapability(supported) + return supported, nil + case http.StatusNotFound: + if err := errutil.ParseErrorResponse(resp); errutil.IsErrorCode(err, errcode.ErrorCodeNameUnknown) { + // repository not found + return false, err + } + r.SetReferrersCapability(false) + return false, nil + default: + return false, errutil.ParseErrorResponse(resp) + } +} + +// delete removes the content identified by the descriptor in the entity "blobs" +// or "manifests". +func (r *Repository) delete(ctx context.Context, target ocispec.Descriptor, isManifest bool) error { + ref := r.Reference + ref.Reference = target.Digest.String() + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionDelete) + buildURL := buildRepositoryBlobURL + if isManifest { + buildURL = buildRepositoryManifestURL + } + url := buildURL(r.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) + if err != nil { + return err + } + + resp, err := r.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusAccepted: + return verifyContentDigest(resp, target.Digest) + case http.StatusNotFound: + return fmt.Errorf("%s: %w", target.Digest, errdef.ErrNotFound) + default: + return errutil.ParseErrorResponse(resp) + } +} + +// blobStore accesses the blob part of the repository. +type blobStore struct { + repo *Repository +} + +// Fetch fetches the content identified by the descriptor. +func (s *blobStore) Fetch(ctx context.Context, target ocispec.Descriptor) (rc io.ReadCloser, err error) { + ref := s.repo.Reference + ref.Reference = target.Digest.String() + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryBlobURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := s.repo.do(req) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + resp.Body.Close() + } + }() + + switch resp.StatusCode { + case http.StatusOK: // server does not support seek as `Range` was ignored. + if size := resp.ContentLength; size != -1 && size != target.Size { + return nil, fmt.Errorf("%s %q: mismatch Content-Length", resp.Request.Method, resp.Request.URL) + } + + // check server range request capability. + // Docker spec allows range header form of "Range: bytes=-". + // However, the remote server may still not RFC 7233 compliant. + // Reference: https://docs.docker.com/registry/spec/api/#blob + if rangeUnit := resp.Header.Get("Accept-Ranges"); rangeUnit == "bytes" { + return httputil.NewReadSeekCloser(s.repo.client(), req, resp.Body, target.Size), nil + } + return resp.Body, nil + case http.StatusNotFound: + return nil, fmt.Errorf("%s: %w", target.Digest, errdef.ErrNotFound) + default: + return nil, errutil.ParseErrorResponse(resp) + } +} + +// Mount mounts the given descriptor from fromRepo into s. +func (s *blobStore) Mount(ctx context.Context, desc ocispec.Descriptor, fromRepo string, getContent func() (io.ReadCloser, error)) error { + // pushing usually requires both pull and push actions. + // Reference: https://github.com/distribution/distribution/blob/v2.7.1/registry/handlers/app.go#L921-L930 + ctx = auth.AppendRepositoryScope(ctx, s.repo.Reference, auth.ActionPull, auth.ActionPush) + + // We also need pull access to the source repo. + fromRef := s.repo.Reference + fromRef.Repository = fromRepo + ctx = auth.AppendRepositoryScope(ctx, fromRef, auth.ActionPull) + + url := buildRepositoryBlobMountURL(s.repo.PlainHTTP, s.repo.Reference, desc.Digest, fromRepo) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return err + } + resp, err := s.repo.do(req) + if err != nil { + return err + } + if resp.StatusCode == http.StatusCreated { + defer resp.Body.Close() + // Check the server seems to be behaving. + return verifyContentDigest(resp, desc.Digest) + } + if resp.StatusCode != http.StatusAccepted { + defer resp.Body.Close() + return errutil.ParseErrorResponse(resp) + } + resp.Body.Close() + // From the [spec]: + // + // "If a registry does not support cross-repository mounting + // or is unable to mount the requested blob, + // it SHOULD return a 202. + // This indicates that the upload session has begun + // and that the client MAY proceed with the upload." + // + // So we need to get the content from somewhere in order to + // push it. If the caller has provided a getContent function, we + // can use that, otherwise pull the content from the source repository. + // + // [spec]: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#mounting-a-blob-from-another-repository + + var r io.ReadCloser + if getContent != nil { + r, err = getContent() + } else { + r, err = s.sibling(fromRepo).Fetch(ctx, desc) + } + if err != nil { + return fmt.Errorf("cannot read source blob: %w", err) + } + defer r.Close() + return s.completePushAfterInitialPost(ctx, req, resp, desc, r) +} + +// sibling returns a blob store for another repository in the same +// registry. +func (s *blobStore) sibling(otherRepoName string) *blobStore { + otherRepo := s.repo.clone() + otherRepo.Reference.Repository = otherRepoName + return &blobStore{ + repo: otherRepo, + } +} + +// Push pushes the content, matching the expected descriptor. +// Existing content is not checked by Push() to minimize the number of out-going +// requests. +// Push is done by conventional 2-step monolithic upload instead of a single +// `POST` request for better overall performance. It also allows early fail on +// authentication errors. +// +// References: +// - https://docs.docker.com/registry/spec/api/#pushing-an-image +// - https://docs.docker.com/registry/spec/api/#initiate-blob-upload +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pushing-a-blob-monolithically +func (s *blobStore) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { + // start an upload + // pushing usually requires both pull and push actions. + // Reference: https://github.com/distribution/distribution/blob/v2.7.1/registry/handlers/app.go#L921-L930 + ctx = auth.AppendRepositoryScope(ctx, s.repo.Reference, auth.ActionPull, auth.ActionPush) + url := buildRepositoryBlobUploadURL(s.repo.PlainHTTP, s.repo.Reference) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return err + } + + resp, err := s.repo.do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusAccepted { + defer resp.Body.Close() + return errutil.ParseErrorResponse(resp) + } + resp.Body.Close() + return s.completePushAfterInitialPost(ctx, req, resp, expected, content) +} + +// completePushAfterInitialPost implements step 2 of the push protocol. This can be invoked either by +// Push or by Mount when the receiving repository does not implement the +// mount endpoint. +func (s *blobStore) completePushAfterInitialPost(ctx context.Context, req *http.Request, resp *http.Response, expected ocispec.Descriptor, content io.Reader) error { + reqHostname := req.URL.Hostname() + reqPort := req.URL.Port() + // monolithic upload + location, err := resp.Location() + if err != nil { + return err + } + // work-around solution for https://github.com/oras-project/oras-go/issues/177 + // For some registries, if the port 443 is explicitly set to the hostname + // like registry.wabbit-networks.io:443/myrepo, blob push will fail since + // the hostname of the Location header in the response is set to + // registry.wabbit-networks.io instead of registry.wabbit-networks.io:443. + locationHostname := location.Hostname() + locationPort := location.Port() + // if location port 443 is missing, add it back + if reqPort == "443" && locationHostname == reqHostname && locationPort == "" { + location.Host = locationHostname + ":" + reqPort + } + url := location.String() + req, err = http.NewRequestWithContext(ctx, http.MethodPut, url, content) + if err != nil { + return err + } + if req.GetBody != nil && req.ContentLength != expected.Size { + // short circuit a size mismatch for built-in types. + return fmt.Errorf("mismatch content length %d: expect %d", req.ContentLength, expected.Size) + } + req.ContentLength = expected.Size + // the expected media type is ignored as in the API doc. + req.Header.Set("Content-Type", "application/octet-stream") + q := req.URL.Query() + q.Set("digest", expected.Digest.String()) + req.URL.RawQuery = q.Encode() + + // reuse credential from previous POST request + if auth := resp.Request.Header.Get("Authorization"); auth != "" { + req.Header.Set("Authorization", auth) + } + resp, err = s.repo.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + return errutil.ParseErrorResponse(resp) + } + return nil +} + +// Exists returns true if the described content exists. +func (s *blobStore) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { + _, err := s.Resolve(ctx, target.Digest.String()) + if err == nil { + return true, nil + } + if errors.Is(err, errdef.ErrNotFound) { + return false, nil + } + return false, err +} + +// Delete removes the content identified by the descriptor. +func (s *blobStore) Delete(ctx context.Context, target ocispec.Descriptor) error { + return s.repo.delete(ctx, target, false) +} + +// Resolve resolves a reference to a descriptor. +func (s *blobStore) Resolve(ctx context.Context, reference string) (ocispec.Descriptor, error) { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return ocispec.Descriptor{}, err + } + refDigest, err := ref.Digest() + if err != nil { + return ocispec.Descriptor{}, err + } + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryBlobURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + return ocispec.Descriptor{}, err + } + + resp, err := s.repo.do(req) + if err != nil { + return ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return generateBlobDescriptor(resp, refDigest) + case http.StatusNotFound: + return ocispec.Descriptor{}, fmt.Errorf("%s: %w", ref, errdef.ErrNotFound) + default: + return ocispec.Descriptor{}, errutil.ParseErrorResponse(resp) + } +} + +// FetchReference fetches the blob identified by the reference. +// The reference must be a digest. +func (s *blobStore) FetchReference(ctx context.Context, reference string) (desc ocispec.Descriptor, rc io.ReadCloser, err error) { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + refDigest, err := ref.Digest() + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryBlobURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + resp, err := s.repo.do(req) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + defer func() { + if err != nil { + resp.Body.Close() + } + }() + + switch resp.StatusCode { + case http.StatusOK: // server does not support seek as `Range` was ignored. + if resp.ContentLength == -1 { + desc, err = s.Resolve(ctx, reference) + } else { + desc, err = generateBlobDescriptor(resp, refDigest) + } + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + // check server range request capability. + // Docker spec allows range header form of "Range: bytes=-". + // However, the remote server may still not RFC 7233 compliant. + // Reference: https://docs.docker.com/registry/spec/api/#blob + if rangeUnit := resp.Header.Get("Accept-Ranges"); rangeUnit == "bytes" { + return desc, httputil.NewReadSeekCloser(s.repo.client(), req, resp.Body, desc.Size), nil + } + return desc, resp.Body, nil + case http.StatusNotFound: + return ocispec.Descriptor{}, nil, fmt.Errorf("%s: %w", ref, errdef.ErrNotFound) + default: + return ocispec.Descriptor{}, nil, errutil.ParseErrorResponse(resp) + } +} + +// generateBlobDescriptor returns a descriptor generated from the response. +func generateBlobDescriptor(resp *http.Response, refDigest digest.Digest) (ocispec.Descriptor, error) { + mediaType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if mediaType == "" { + mediaType = "application/octet-stream" + } + + size := resp.ContentLength + if size == -1 { + return ocispec.Descriptor{}, fmt.Errorf("%s %q: unknown response Content-Length", resp.Request.Method, resp.Request.URL) + } + + if err := verifyContentDigest(resp, refDigest); err != nil { + return ocispec.Descriptor{}, err + } + + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: refDigest, + Size: size, + }, nil +} + +// manifestStore accesses the manifest part of the repository. +type manifestStore struct { + repo *Repository +} + +// Fetch fetches the content identified by the descriptor. +func (s *manifestStore) Fetch(ctx context.Context, target ocispec.Descriptor) (rc io.ReadCloser, err error) { + ref := s.repo.Reference + ref.Reference = target.Digest.String() + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryManifestURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", target.MediaType) + + resp, err := s.repo.do(req) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + resp.Body.Close() + } + }() + + switch resp.StatusCode { + case http.StatusOK: + // no-op + case http.StatusNotFound: + return nil, fmt.Errorf("%s: %w", target.Digest, errdef.ErrNotFound) + default: + return nil, errutil.ParseErrorResponse(resp) + } + mediaType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return nil, fmt.Errorf("%s %q: invalid response Content-Type: %w", resp.Request.Method, resp.Request.URL, err) + } + if mediaType != target.MediaType { + return nil, fmt.Errorf("%s %q: mismatch response Content-Type %q: expect %q", resp.Request.Method, resp.Request.URL, mediaType, target.MediaType) + } + if size := resp.ContentLength; size != -1 && size != target.Size { + return nil, fmt.Errorf("%s %q: mismatch Content-Length", resp.Request.Method, resp.Request.URL) + } + if err := verifyContentDigest(resp, target.Digest); err != nil { + return nil, err + } + return resp.Body, nil +} + +// Push pushes the content, matching the expected descriptor. +func (s *manifestStore) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { + return s.pushWithIndexing(ctx, expected, content, expected.Digest.String()) +} + +// Exists returns true if the described content exists. +func (s *manifestStore) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { + _, err := s.Resolve(ctx, target.Digest.String()) + if err == nil { + return true, nil + } + if errors.Is(err, errdef.ErrNotFound) { + return false, nil + } + return false, err +} + +// Delete removes the manifest content identified by the descriptor. +func (s *manifestStore) Delete(ctx context.Context, target ocispec.Descriptor) error { + return s.deleteWithIndexing(ctx, target) +} + +// deleteWithIndexing removes the manifest content identified by the descriptor, +// and indexes referrers for the manifest when needed. +func (s *manifestStore) deleteWithIndexing(ctx context.Context, target ocispec.Descriptor) error { + switch target.MediaType { + case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + if state := s.repo.loadReferrersState(); state == referrersStateSupported { + // referrers API is available, no client-side indexing needed + return s.repo.delete(ctx, target, true) + } + + if err := limitSize(target, s.repo.MaxMetadataBytes); err != nil { + return err + } + ctx = auth.AppendRepositoryScope(ctx, s.repo.Reference, auth.ActionPull, auth.ActionDelete) + manifestJSON, err := content.FetchAll(ctx, s, target) + if err != nil { + return err + } + if err := s.indexReferrersForDelete(ctx, target, manifestJSON); err != nil { + return err + } + } + + return s.repo.delete(ctx, target, true) +} + +// indexReferrersForDelete indexes referrers for manifests with a subject field +// on manifest delete. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#deleting-manifests +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#deleting-manifests +func (s *manifestStore) indexReferrersForDelete(ctx context.Context, desc ocispec.Descriptor, manifestJSON []byte) error { + var manifest struct { + Subject *ocispec.Descriptor `json:"subject"` + } + if err := json.Unmarshal(manifestJSON, &manifest); err != nil { + return fmt.Errorf("failed to decode manifest: %s: %s: %w", desc.Digest, desc.MediaType, err) + } + if manifest.Subject == nil { + // no subject, no indexing needed + return nil + } + + subject := *manifest.Subject + ok, err := s.repo.pingReferrers(ctx) + if err != nil { + return err + } + if ok { + // referrers API is available, no client-side indexing needed + return nil + } + return s.updateReferrersIndex(ctx, subject, referrerChange{desc, referrerOperationRemove}) +} + +// Resolve resolves a reference to a descriptor. +// See also `ManifestMediaTypes`. +func (s *manifestStore) Resolve(ctx context.Context, reference string) (ocispec.Descriptor, error) { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return ocispec.Descriptor{}, err + } + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryManifestURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + return ocispec.Descriptor{}, err + } + req.Header.Set("Accept", manifestAcceptHeader(s.repo.ManifestMediaTypes)) + + resp, err := s.repo.do(req) + if err != nil { + return ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return s.generateDescriptor(resp, ref, req.Method) + case http.StatusNotFound: + return ocispec.Descriptor{}, fmt.Errorf("%s: %w", ref, errdef.ErrNotFound) + default: + return ocispec.Descriptor{}, errutil.ParseErrorResponse(resp) + } +} + +// FetchReference fetches the manifest identified by the reference. +// The reference can be a tag or digest. +func (s *manifestStore) FetchReference(ctx context.Context, reference string) (desc ocispec.Descriptor, rc io.ReadCloser, err error) { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull) + url := buildRepositoryManifestURL(s.repo.PlainHTTP, ref) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + req.Header.Set("Accept", manifestAcceptHeader(s.repo.ManifestMediaTypes)) + + resp, err := s.repo.do(req) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + defer func() { + if err != nil { + resp.Body.Close() + } + }() + + switch resp.StatusCode { + case http.StatusOK: + if resp.ContentLength == -1 { + desc, err = s.Resolve(ctx, reference) + } else { + desc, err = s.generateDescriptor(resp, ref, req.Method) + } + if err != nil { + return ocispec.Descriptor{}, nil, err + } + return desc, resp.Body, nil + case http.StatusNotFound: + return ocispec.Descriptor{}, nil, fmt.Errorf("%s: %w", ref, errdef.ErrNotFound) + default: + return ocispec.Descriptor{}, nil, errutil.ParseErrorResponse(resp) + } +} + +// Tag tags a manifest descriptor with a reference string. +func (s *manifestStore) Tag(ctx context.Context, desc ocispec.Descriptor, reference string) error { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return err + } + + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull, auth.ActionPush) + rc, err := s.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return s.push(ctx, desc, rc, ref.Reference) +} + +// PushReference pushes the manifest with a reference tag. +func (s *manifestStore) PushReference(ctx context.Context, expected ocispec.Descriptor, content io.Reader, reference string) error { + ref, err := s.repo.ParseReference(reference) + if err != nil { + return err + } + return s.pushWithIndexing(ctx, expected, content, ref.Reference) +} + +// push pushes the manifest content, matching the expected descriptor. +func (s *manifestStore) push(ctx context.Context, expected ocispec.Descriptor, content io.Reader, reference string) error { + ref := s.repo.Reference + ref.Reference = reference + // pushing usually requires both pull and push actions. + // Reference: https://github.com/distribution/distribution/blob/v2.7.1/registry/handlers/app.go#L921-L930 + ctx = auth.AppendRepositoryScope(ctx, ref, auth.ActionPull, auth.ActionPush) + url := buildRepositoryManifestURL(s.repo.PlainHTTP, ref) + // unwrap the content for optimizations of built-in types. + body := ioutil.UnwrapNopCloser(content) + if _, ok := body.(io.ReadCloser); ok { + // undo unwrap if the nopCloser is intended. + body = content + } + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, body) + if err != nil { + return err + } + if req.GetBody != nil && req.ContentLength != expected.Size { + // short circuit a size mismatch for built-in types. + return fmt.Errorf("mismatch content length %d: expect %d", req.ContentLength, expected.Size) + } + req.ContentLength = expected.Size + req.Header.Set("Content-Type", expected.MediaType) + + // if the underlying client is an auth client, the content might be read + // more than once for obtaining the auth challenge and the actual request. + // To prevent double reading, the manifest is read and stored in the memory, + // and serve from the memory. + client := s.repo.client() + if _, ok := client.(*auth.Client); ok && req.GetBody == nil { + store := cas.NewMemory() + err := store.Push(ctx, expected, content) + if err != nil { + return err + } + req.GetBody = func() (io.ReadCloser, error) { + return store.Fetch(ctx, expected) + } + req.Body, err = req.GetBody() + if err != nil { + return err + } + } + resp, err := s.repo.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + return errutil.ParseErrorResponse(resp) + } + s.checkOCISubjectHeader(resp) + return verifyContentDigest(resp, expected.Digest) +} + +// checkOCISubjectHeader checks the "OCI-Subject" header in the response and +// sets referrers capability accordingly. +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pushing-manifests-with-subject +func (s *manifestStore) checkOCISubjectHeader(resp *http.Response) { + // If the "OCI-Subject" header is set, it indicates that the registry + // supports the Referrers API and has processed the subject of the manifest. + if subjectHeader := resp.Header.Get(headerOCISubject); subjectHeader != "" { + s.repo.SetReferrersCapability(true) + } + + // If the "OCI-Subject" header is NOT set, it means that either the manifest + // has no subject OR the referrers API is NOT supported by the registry. + // + // Since we don't know whether the pushed manifest has a subject or not, + // we do not set the referrers capability to false at here. +} + +// pushWithIndexing pushes the manifest content matching the expected descriptor, +// and indexes referrers for the manifest when needed. +func (s *manifestStore) pushWithIndexing(ctx context.Context, expected ocispec.Descriptor, r io.Reader, reference string) error { + switch expected.MediaType { + case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + if state := s.repo.loadReferrersState(); state == referrersStateSupported { + // referrers API is available, no client-side indexing needed + return s.push(ctx, expected, r, reference) + } + + if err := limitSize(expected, s.repo.MaxMetadataBytes); err != nil { + return err + } + manifestJSON, err := content.ReadAll(r, expected) + if err != nil { + return err + } + if err := s.push(ctx, expected, bytes.NewReader(manifestJSON), reference); err != nil { + return err + } + // check referrers API availability again after push + if state := s.repo.loadReferrersState(); state == referrersStateSupported { + // the subject has been processed the registry, no client-side + // indexing needed + return nil + } + return s.indexReferrersForPush(ctx, expected, manifestJSON) + default: + return s.push(ctx, expected, r, reference) + } +} + +// indexReferrersForPush indexes referrers for manifests with a subject field +// on manifest push. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pushing-manifests-with-subject +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pushing-manifests-with-subject +func (s *manifestStore) indexReferrersForPush(ctx context.Context, desc ocispec.Descriptor, manifestJSON []byte) error { + var subject ocispec.Descriptor + switch desc.MediaType { + case spec.MediaTypeArtifactManifest: + var manifest spec.Artifact + if err := json.Unmarshal(manifestJSON, &manifest); err != nil { + return fmt.Errorf("failed to decode manifest: %s: %s: %w", desc.Digest, desc.MediaType, err) + } + if manifest.Subject == nil { + // no subject, no indexing needed + return nil + } + subject = *manifest.Subject + desc.ArtifactType = manifest.ArtifactType + desc.Annotations = manifest.Annotations + case ocispec.MediaTypeImageManifest: + var manifest ocispec.Manifest + if err := json.Unmarshal(manifestJSON, &manifest); err != nil { + return fmt.Errorf("failed to decode manifest: %s: %s: %w", desc.Digest, desc.MediaType, err) + } + if manifest.Subject == nil { + // no subject, no indexing needed + return nil + } + subject = *manifest.Subject + desc.ArtifactType = manifest.ArtifactType + if desc.ArtifactType == "" { + desc.ArtifactType = manifest.Config.MediaType + } + desc.Annotations = manifest.Annotations + case ocispec.MediaTypeImageIndex: + var manifest ocispec.Index + if err := json.Unmarshal(manifestJSON, &manifest); err != nil { + return fmt.Errorf("failed to decode manifest: %s: %s: %w", desc.Digest, desc.MediaType, err) + } + if manifest.Subject == nil { + // no subject, no indexing needed + return nil + } + subject = *manifest.Subject + desc.ArtifactType = manifest.ArtifactType + desc.Annotations = manifest.Annotations + default: + return nil + } + + // if the manifest has a subject but the remote registry does not process it, + // it means that the Referrers API is not supported by the registry. + s.repo.SetReferrersCapability(false) + return s.updateReferrersIndex(ctx, subject, referrerChange{desc, referrerOperationAdd}) +} + +// updateReferrersIndex updates the referrers index for desc referencing subject +// on manifest push and manifest delete. +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#pushing-manifests-with-subject +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#deleting-manifests +func (s *manifestStore) updateReferrersIndex(ctx context.Context, subject ocispec.Descriptor, change referrerChange) (err error) { + referrersTag := buildReferrersTag(subject) + + var oldIndexDesc *ocispec.Descriptor + var oldReferrers []ocispec.Descriptor + prepare := func() error { + // 1. pull the original referrers list using the referrers tag schema + indexDesc, referrers, err := s.repo.referrersFromIndex(ctx, referrersTag) + if err != nil { + if errors.Is(err, errdef.ErrNotFound) { + // valid case: no old referrers index + return nil + } + return err + } + oldIndexDesc = &indexDesc + oldReferrers = referrers + return nil + } + update := func(referrerChanges []referrerChange) error { + // 2. apply the referrer changes on the referrers list + updatedReferrers, err := applyReferrerChanges(oldReferrers, referrerChanges) + if err != nil { + if err == errNoReferrerUpdate { + return nil + } + return err + } + + // 3. push the updated referrers list using referrers tag schema + if len(updatedReferrers) > 0 || s.repo.SkipReferrersGC { + // push a new index in either case: + // 1. the referrers list has been updated with a non-zero size + // 2. OR the updated referrers list is empty but referrers GC + // is skipped, in this case an empty index should still be pushed + // as the old index won't get deleted + newIndexDesc, newIndex, err := generateIndex(updatedReferrers) + if err != nil { + return fmt.Errorf("failed to generate referrers index for referrers tag %s: %w", referrersTag, err) + } + if err := s.push(ctx, newIndexDesc, bytes.NewReader(newIndex), referrersTag); err != nil { + return fmt.Errorf("failed to push referrers index tagged by %s: %w", referrersTag, err) + } + } + + // 4. delete the dangling original referrers index, if applicable + if s.repo.SkipReferrersGC || oldIndexDesc == nil { + return nil + } + if err := s.repo.delete(ctx, *oldIndexDesc, true); err != nil { + return &ReferrersError{ + Op: opDeleteReferrersIndex, + Err: fmt.Errorf("failed to delete dangling referrers index %s for referrers tag %s: %w", oldIndexDesc.Digest.String(), referrersTag, err), + Subject: subject, + } + } + return nil + } + + merge, done := s.repo.referrersMergePool.Get(referrersTag) + defer done() + return merge.Do(change, prepare, update) +} + +// ParseReference parses a reference to a fully qualified reference. +func (s *manifestStore) ParseReference(reference string) (registry.Reference, error) { + return s.repo.ParseReference(reference) +} + +// generateDescriptor returns a descriptor generated from the response. +// See the truth table at the top of `repository_test.go` +func (s *manifestStore) generateDescriptor(resp *http.Response, ref registry.Reference, httpMethod string) (ocispec.Descriptor, error) { + // 1. Validate Content-Type + mediaType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf( + "%s %q: invalid response `Content-Type` header; %w", + resp.Request.Method, + resp.Request.URL, + err, + ) + } + + // 2. Validate Size + if resp.ContentLength == -1 { + return ocispec.Descriptor{}, fmt.Errorf( + "%s %q: unknown response Content-Length", + resp.Request.Method, + resp.Request.URL, + ) + } + + // 3. Validate Client Reference + var refDigest digest.Digest + if d, err := ref.Digest(); err == nil { + refDigest = d + } + + // 4. Validate Server Digest (if present) + var serverHeaderDigest digest.Digest + if serverHeaderDigestStr := resp.Header.Get(headerDockerContentDigest); serverHeaderDigestStr != "" { + if serverHeaderDigest, err = digest.Parse(serverHeaderDigestStr); err != nil { + return ocispec.Descriptor{}, fmt.Errorf( + "%s %q: invalid response header value: `%s: %s`; %w", + resp.Request.Method, + resp.Request.URL, + headerDockerContentDigest, + serverHeaderDigestStr, + err, + ) + } + } + + /* 5. Now, look for specific error conditions; see truth table in method docstring */ + var contentDigest digest.Digest + + if len(serverHeaderDigest) == 0 { + if httpMethod == http.MethodHead { + if len(refDigest) == 0 { + // HEAD without server `Docker-Content-Digest` header is an + // immediate fail + return ocispec.Descriptor{}, fmt.Errorf( + "HTTP %s request missing required header %q", + httpMethod, headerDockerContentDigest, + ) + } + // Otherwise, just trust the client-supplied digest + contentDigest = refDigest + } else { + // GET without server `Docker-Content-Digest` header forces the + // expensive calculation + var calculatedDigest digest.Digest + if calculatedDigest, err = calculateDigestFromResponse(resp, s.repo.MaxMetadataBytes); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to calculate digest on response body; %w", err) + } + contentDigest = calculatedDigest + } + } else { + contentDigest = serverHeaderDigest + } + + if len(refDigest) > 0 && refDigest != contentDigest { + return ocispec.Descriptor{}, fmt.Errorf( + "%s %q: invalid response; digest mismatch in %s: received %q when expecting %q", + resp.Request.Method, resp.Request.URL, + headerDockerContentDigest, contentDigest, + refDigest, + ) + } + + // 6. Finally, if we made it this far, then all is good; return. + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: contentDigest, + Size: resp.ContentLength, + }, nil +} + +// calculateDigestFromResponse calculates the actual digest of the response body +// taking care not to destroy it in the process. +func calculateDigestFromResponse(resp *http.Response, maxMetadataBytes int64) (digest.Digest, error) { + defer resp.Body.Close() + + body := limitReader(resp.Body, maxMetadataBytes) + content, err := io.ReadAll(body) + if err != nil { + return "", fmt.Errorf("%s %q: failed to read response body: %w", resp.Request.Method, resp.Request.URL, err) + } + resp.Body = io.NopCloser(bytes.NewReader(content)) + + return digest.FromBytes(content), nil +} + +// verifyContentDigest verifies "Docker-Content-Digest" header if present. +// OCI distribution-spec states the Docker-Content-Digest header is optional. +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.0.1/spec.md#legacy-docker-support-http-headers +func verifyContentDigest(resp *http.Response, expected digest.Digest) error { + digestStr := resp.Header.Get(headerDockerContentDigest) + + if len(digestStr) == 0 { + return nil + } + + contentDigest, err := digest.Parse(digestStr) + if err != nil { + return fmt.Errorf( + "%s %q: invalid response header: `%s: %s`", + resp.Request.Method, resp.Request.URL, + headerDockerContentDigest, digestStr, + ) + } + + if contentDigest != expected { + return fmt.Errorf( + "%s %q: invalid response; digest mismatch in %s: received %q when expecting %q", + resp.Request.Method, resp.Request.URL, + headerDockerContentDigest, contentDigest, + expected, + ) + } + + return nil +} + +// generateIndex generates an image index containing the given manifests list. +func generateIndex(manifests []ocispec.Descriptor) (ocispec.Descriptor, []byte, error) { + if manifests == nil { + manifests = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } + index := ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + MediaType: ocispec.MediaTypeImageIndex, + Manifests: manifests, + } + indexJSON, err := json.Marshal(index) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + indexDesc := content.NewDescriptorFromBytes(index.MediaType, indexJSON) + return indexDesc, indexJSON, nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/retry/client.go b/vendor/oras.land/oras-go/v2/registry/remote/retry/client.go new file mode 100644 index 000000000000..5e986ea02a1d --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/retry/client.go @@ -0,0 +1,114 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "net/http" + "time" +) + +// DefaultClient is a client with the default retry policy. +var DefaultClient = NewClient() + +// NewClient creates an HTTP client with the default retry policy. +func NewClient() *http.Client { + return &http.Client{ + Transport: NewTransport(nil), + } +} + +// Transport is an HTTP transport with retry policy. +type Transport struct { + // Base is the underlying HTTP transport to use. + // If nil, http.DefaultTransport is used for round trips. + Base http.RoundTripper + + // Policy returns a retry Policy to use for the request. + // If nil, DefaultPolicy is used to determine if the request should be retried. + Policy func() Policy +} + +// NewTransport creates an HTTP Transport with the default retry policy. +func NewTransport(base http.RoundTripper) *Transport { + return &Transport{ + Base: base, + } +} + +// RoundTrip executes a single HTTP transaction, returning a Response for the +// provided Request. +// It relies on the configured Policy to determine if the request should be +// retried and to backoff. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + policy := t.policy() + attempt := 0 + for { + resp, respErr := t.roundTrip(req) + duration, err := policy.Retry(attempt, resp, respErr) + if err != nil { + if respErr == nil { + resp.Body.Close() + } + return nil, err + } + if duration < 0 { + return resp, respErr + } + + // rewind the body if possible + if req.Body != nil { + if req.GetBody == nil { + // body can't be rewound, so we can't retry + return resp, respErr + } + body, err := req.GetBody() + if err != nil { + // failed to rewind the body, so we can't retry + return resp, respErr + } + req.Body = body + } + + // close the response body if needed + if respErr == nil { + resp.Body.Close() + } + + timer := time.NewTimer(duration) + select { + case <-ctx.Done(): + timer.Stop() + return nil, ctx.Err() + case <-timer.C: + } + attempt++ + } +} + +func (t *Transport) roundTrip(req *http.Request) (*http.Response, error) { + if t.Base == nil { + return http.DefaultTransport.RoundTrip(req) + } + return t.Base.RoundTrip(req) +} + +func (t *Transport) policy() Policy { + if t.Policy == nil { + return DefaultPolicy + } + return t.Policy() +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go b/vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go new file mode 100644 index 000000000000..fe7fadee74a9 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go @@ -0,0 +1,154 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "hash/maphash" + "math" + "math/rand" + "net" + "net/http" + "strconv" + "time" +) + +// headerRetryAfter is the header key for Retry-After. +const headerRetryAfter = "Retry-After" + +// DefaultPolicy is a policy with fine-tuned retry parameters. +// It uses an exponential backoff with jitter. +var DefaultPolicy Policy = &GenericPolicy{ + Retryable: DefaultPredicate, + Backoff: DefaultBackoff, + MinWait: 200 * time.Millisecond, + MaxWait: 3 * time.Second, + MaxRetry: 5, +} + +// DefaultPredicate is a predicate that retries on 5xx errors, 429 Too Many +// Requests, 408 Request Timeout and on network dial timeout. +var DefaultPredicate Predicate = func(resp *http.Response, err error) (bool, error) { + if err != nil { + // retry on Dial timeout + if err, ok := err.(net.Error); ok && err.Timeout() { + return true, nil + } + return false, err + } + + if resp.StatusCode == http.StatusRequestTimeout || resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + return true, nil + } + + return false, nil +} + +// DefaultBackoff is a backoff that uses an exponential backoff with jitter. +// It uses a base of 250ms, a factor of 2 and a jitter of 10%. +var DefaultBackoff Backoff = ExponentialBackoff(250*time.Millisecond, 2, 0.1) + +// Policy is a retry policy. +type Policy interface { + // Retry returns the duration to wait before retrying the request. + // It returns a negative value if the request should not be retried. + // The attempt is used to: + // - calculate the backoff duration, the default backoff is an exponential backoff. + // - determine if the request should be retried. + // The attempt starts at 0 and should be less than MaxRetry for the request to + // be retried. + Retry(attempt int, resp *http.Response, err error) (time.Duration, error) +} + +// Predicate is a function that returns true if the request should be retried. +type Predicate func(resp *http.Response, err error) (bool, error) + +// Backoff is a function that returns the duration to wait before retrying the +// request. The attempt, is the next attempt number. The response is the +// response from the previous request. +type Backoff func(attempt int, resp *http.Response) time.Duration + +// ExponentialBackoff returns a Backoff that uses an exponential backoff with +// jitter. The backoff is calculated as: +// +// temp = backoff * factor ^ attempt +// interval = temp * (1 - jitter) + rand.Int63n(2 * jitter * temp) +// +// The HTTP response is checked for a Retry-After header. If it is present, the +// value is used as the backoff duration. +func ExponentialBackoff(backoff time.Duration, factor, jitter float64) Backoff { + return func(attempt int, resp *http.Response) time.Duration { + var h maphash.Hash + h.SetSeed(maphash.MakeSeed()) + rand := rand.New(rand.NewSource(int64(h.Sum64()))) + + // check Retry-After + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + if v := resp.Header.Get(headerRetryAfter); v != "" { + if retryAfter, _ := strconv.ParseInt(v, 10, 64); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } + } + } + + // do exponential backoff with jitter + temp := float64(backoff) * math.Pow(factor, float64(attempt)) + return time.Duration(temp*(1-jitter)) + time.Duration(rand.Int63n(int64(2*jitter*temp))) + } +} + +// GenericPolicy is a generic retry policy. +type GenericPolicy struct { + // Retryable is a predicate that returns true if the request should be + // retried. + Retryable Predicate + + // Backoff is a function that returns the duration to wait before retrying. + Backoff Backoff + + // MinWait is the minimum duration to wait before retrying. + MinWait time.Duration + + // MaxWait is the maximum duration to wait before retrying. + MaxWait time.Duration + + // MaxRetry is the maximum number of retries. + MaxRetry int +} + +// Retry returns the duration to wait before retrying the request. +// It returns -1 if the request should not be retried. +func (p *GenericPolicy) Retry(attempt int, resp *http.Response, err error) (time.Duration, error) { + if attempt >= p.MaxRetry { + return -1, nil + } + if ok, err := p.Retryable(resp, err); err != nil { + return -1, err + } else if !ok { + return -1, nil + } + backoff := p.Backoff(attempt, resp) + if backoff < p.MinWait { + backoff = p.MinWait + } + if backoff > p.MaxWait { + backoff = p.MaxWait + } + return backoff, nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/url.go b/vendor/oras.land/oras-go/v2/registry/remote/url.go new file mode 100644 index 000000000000..2d4b422b3329 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/url.go @@ -0,0 +1,119 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "fmt" + "net/url" + "strings" + + "github.com/opencontainers/go-digest" + "oras.land/oras-go/v2/registry" +) + +// buildScheme returns HTTP scheme used to access the remote registry. +func buildScheme(plainHTTP bool) string { + if plainHTTP { + return "http" + } + return "https" +} + +// buildRegistryBaseURL builds the URL for accessing the base API. +// Format: :///v2/ +// Reference: https://docs.docker.com/registry/spec/api/#base +func buildRegistryBaseURL(plainHTTP bool, ref registry.Reference) string { + return fmt.Sprintf("%s://%s/v2/", buildScheme(plainHTTP), ref.Host()) +} + +// buildRegistryCatalogURL builds the URL for accessing the catalog API. +// Format: :///v2/_catalog +// Reference: https://docs.docker.com/registry/spec/api/#catalog +func buildRegistryCatalogURL(plainHTTP bool, ref registry.Reference) string { + return fmt.Sprintf("%s://%s/v2/_catalog", buildScheme(plainHTTP), ref.Host()) +} + +// buildRepositoryBaseURL builds the base endpoint of the remote repository. +// Format: :///v2/ +func buildRepositoryBaseURL(plainHTTP bool, ref registry.Reference) string { + return fmt.Sprintf("%s://%s/v2/%s", buildScheme(plainHTTP), ref.Host(), ref.Repository) +} + +// buildRepositoryTagListURL builds the URL for accessing the tag list API. +// Format: :///v2//tags/list +// Reference: https://docs.docker.com/registry/spec/api/#tags +func buildRepositoryTagListURL(plainHTTP bool, ref registry.Reference) string { + return buildRepositoryBaseURL(plainHTTP, ref) + "/tags/list" +} + +// buildRepositoryManifestURL builds the URL for accessing the manifest API. +// Format: :///v2//manifests/ +// Reference: https://docs.docker.com/registry/spec/api/#manifest +func buildRepositoryManifestURL(plainHTTP bool, ref registry.Reference) string { + return strings.Join([]string{ + buildRepositoryBaseURL(plainHTTP, ref), + "manifests", + ref.Reference, + }, "/") +} + +// buildRepositoryBlobURL builds the URL for accessing the blob API. +// Format: :///v2//blobs/ +// Reference: https://docs.docker.com/registry/spec/api/#blob +func buildRepositoryBlobURL(plainHTTP bool, ref registry.Reference) string { + return strings.Join([]string{ + buildRepositoryBaseURL(plainHTTP, ref), + "blobs", + ref.Reference, + }, "/") +} + +// buildRepositoryBlobUploadURL builds the URL for blob uploading. +// Format: :///v2//blobs/uploads/ +// Reference: https://docs.docker.com/registry/spec/api/#initiate-blob-upload +func buildRepositoryBlobUploadURL(plainHTTP bool, ref registry.Reference) string { + return buildRepositoryBaseURL(plainHTTP, ref) + "/blobs/uploads/" +} + +// buildRepositoryBlobMountURLbuilds the URL for cross-repository mounting. +// Format: :///v2//blobs/uploads/?mount=&from= +// Reference: https://docs.docker.com/registry/spec/api/#blob +func buildRepositoryBlobMountURL(plainHTTP bool, ref registry.Reference, d digest.Digest, fromRepo string) string { + return fmt.Sprintf("%s?mount=%s&from=%s", + buildRepositoryBlobUploadURL(plainHTTP, ref), + d, + fromRepo, + ) +} + +// buildReferrersURL builds the URL for querying the Referrers API. +// Format: :///v2//referrers/?artifactType= +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +func buildReferrersURL(plainHTTP bool, ref registry.Reference, artifactType string) string { + var query string + if artifactType != "" { + v := url.Values{} + v.Set("artifactType", artifactType) + query = "?" + v.Encode() + } + + return fmt.Sprintf( + "%s/referrers/%s%s", + buildRepositoryBaseURL(plainHTTP, ref), + ref.Reference, + query, + ) +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/utils.go b/vendor/oras.land/oras-go/v2/registry/remote/utils.go new file mode 100644 index 000000000000..797169f48f78 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/utils.go @@ -0,0 +1,94 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" +) + +// defaultMaxMetadataBytes specifies the default limit on how many response +// bytes are allowed in the server's response to the metadata APIs. +// See also: Repository.MaxMetadataBytes +var defaultMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB + +// errNoLink is returned by parseLink() when no Link header is present. +var errNoLink = errors.New("no Link header in response") + +// parseLink returns the URL of the response's "Link" header, if present. +func parseLink(resp *http.Response) (string, error) { + link := resp.Header.Get("Link") + if link == "" { + return "", errNoLink + } + if link[0] != '<' { + return "", fmt.Errorf("invalid next link %q: missing '<'", link) + } + if i := strings.IndexByte(link, '>'); i == -1 { + return "", fmt.Errorf("invalid next link %q: missing '>'", link) + } else { + link = link[1:i] + } + + linkURL, err := resp.Request.URL.Parse(link) + if err != nil { + return "", err + } + return linkURL.String(), nil +} + +// limitReader returns a Reader that reads from r but stops with EOF after n +// bytes. If n is less than or equal to zero, defaultMaxMetadataBytes is used. +func limitReader(r io.Reader, n int64) io.Reader { + if n <= 0 { + n = defaultMaxMetadataBytes + } + return io.LimitReader(r, n) +} + +// limitSize returns ErrSizeExceedsLimit if the size of desc exceeds the limit n. +// If n is less than or equal to zero, defaultMaxMetadataBytes is used. +func limitSize(desc ocispec.Descriptor, n int64) error { + if n <= 0 { + n = defaultMaxMetadataBytes + } + if desc.Size > n { + return fmt.Errorf( + "content size %v exceeds MaxMetadataBytes %v: %w", + desc.Size, + n, + errdef.ErrSizeExceedsLimit) + } + return nil +} + +// decodeJSON safely reads the JSON content described by desc, and +// decodes it into v. +func decodeJSON(r io.Reader, desc ocispec.Descriptor, v any) error { + jsonBytes, err := content.ReadAll(r, desc) + if err != nil { + return err + } + return json.Unmarshal(jsonBytes, v) +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/warning.go b/vendor/oras.land/oras-go/v2/registry/remote/warning.go new file mode 100644 index 000000000000..20f5071f3f3a --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/warning.go @@ -0,0 +1,100 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + // headerWarning is the "Warning" header. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + headerWarning = "Warning" + + // warnCode299 is the 299 warn-code. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + warnCode299 = 299 + + // warnAgentUnknown represents an unknown warn-agent. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + warnAgentUnknown = "-" +) + +// errUnexpectedWarningFormat is returned by parseWarningHeader when +// an unexpected warning format is encountered. +var errUnexpectedWarningFormat = errors.New("unexpected warning format") + +// WarningValue represents the value of the Warning header. +// +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#warnings +// - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 +type WarningValue struct { + // Code is the warn-code. + Code int + // Agent is the warn-agent. + Agent string + // Text is the warn-text. + Text string +} + +// Warning contains the value of the warning header and may contain +// other information related to the warning. +// +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#warnings +// - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 +type Warning struct { + // WarningValue is the value of the warning header. + WarningValue +} + +// parseWarningHeader parses the warning header into WarningValue. +func parseWarningHeader(header string) (WarningValue, error) { + if len(header) < 9 || !strings.HasPrefix(header, `299 - "`) || !strings.HasSuffix(header, `"`) { + // minimum header value: `299 - "x"` + return WarningValue{}, fmt.Errorf("%s: %w", header, errUnexpectedWarningFormat) + } + + // validate text only as code and agent are fixed + quotedText := header[6:] // behind `299 - `, quoted by " + text, err := strconv.Unquote(quotedText) + if err != nil { + return WarningValue{}, fmt.Errorf("%s: unexpected text: %w: %v", header, errUnexpectedWarningFormat, err) + } + + return WarningValue{ + Code: warnCode299, + Agent: warnAgentUnknown, + Text: text, + }, nil +} + +// handleWarningHeaders parses the warning headers and handles the parsed +// warnings using handleWarning. +func handleWarningHeaders(headers []string, handleWarning func(Warning)) { + for _, h := range headers { + if value, err := parseWarningHeader(h); err == nil { + // ignore warnings in unexpected formats + handleWarning(Warning{ + WarningValue: value, + }) + } + } +} diff --git a/vendor/oras.land/oras-go/v2/registry/repository.go b/vendor/oras.land/oras-go/v2/registry/repository.go new file mode 100644 index 000000000000..84a50e2af28a --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/repository.go @@ -0,0 +1,226 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "encoding/json" + "fmt" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/errdef" + "oras.land/oras-go/v2/internal/descriptor" + "oras.land/oras-go/v2/internal/spec" +) + +// Repository is an ORAS target and an union of the blob and the manifest CASs. +// +// As specified by https://docs.docker.com/registry/spec/api/, it is natural to +// assume that content.Resolver interface only works for manifests. Tagging a +// blob may be resulted in an `ErrUnsupported` error. However, this interface +// does not restrict tagging blobs. +// +// Since a repository is an union of the blob and the manifest CASs, all +// operations defined in the `BlobStore` are executed depending on the media +// type of the given descriptor accordingly. +// +// Furthermore, this interface also provides the ability to enforce the +// separation of the blob and the manifests CASs. +type Repository interface { + content.Storage + content.Deleter + content.TagResolver + ReferenceFetcher + ReferencePusher + ReferrerLister + TagLister + + // Blobs provides access to the blob CAS only, which contains config blobs, + // layers, and other generic blobs. + Blobs() BlobStore + + // Manifests provides access to the manifest CAS only. + Manifests() ManifestStore +} + +// BlobStore is a CAS with the ability to stat and delete its content. +type BlobStore interface { + content.Storage + content.Deleter + content.Resolver + ReferenceFetcher +} + +// ManifestStore is a CAS with the ability to stat and delete its content. +// Besides, ManifestStore provides reference tagging. +type ManifestStore interface { + BlobStore + content.Tagger + ReferencePusher +} + +// ReferencePusher provides advanced push with the tag service. +type ReferencePusher interface { + // PushReference pushes the manifest with a reference tag. + PushReference(ctx context.Context, expected ocispec.Descriptor, content io.Reader, reference string) error +} + +// ReferenceFetcher provides advanced fetch with the tag service. +type ReferenceFetcher interface { + // FetchReference fetches the content identified by the reference. + FetchReference(ctx context.Context, reference string) (ocispec.Descriptor, io.ReadCloser, error) +} + +// ReferrerLister provides the Referrers API. +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +type ReferrerLister interface { + Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error +} + +// TagLister lists tags by the tag service. +type TagLister interface { + // Tags lists the tags available in the repository. + // Since the returned tag list may be paginated by the underlying + // implementation, a function should be passed in to process the paginated + // tag list. + // + // `last` argument is the `last` parameter when invoking the tags API. + // If `last` is NOT empty, the entries in the response start after the + // tag specified by `last`. Otherwise, the response starts from the top + // of the Tags list. + // + // Note: When implemented by a remote registry, the tags API is called. + // However, not all registries supports pagination or conforms the + // specification. + // + // References: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#content-discovery + // - https://docs.docker.com/registry/spec/api/#tags + // See also `Tags()` in this package. + Tags(ctx context.Context, last string, fn func(tags []string) error) error +} + +// Mounter allows cross-repository blob mounts. +// For backward compatibility reasons, this is not implemented by +// BlobStore: use a type assertion to check availability. +type Mounter interface { + // Mount makes the blob with the given descriptor in fromRepo + // available in the repository signified by the receiver. + Mount(ctx context.Context, + desc ocispec.Descriptor, + fromRepo string, + getContent func() (io.ReadCloser, error), + ) error +} + +// Tags lists the tags available in the repository. +func Tags(ctx context.Context, repo TagLister) ([]string, error) { + var res []string + if err := repo.Tags(ctx, "", func(tags []string) error { + res = append(res, tags...) + return nil + }); err != nil { + return nil, err + } + return res, nil +} + +// Referrers lists the descriptors of image or artifact manifests directly +// referencing the given manifest descriptor. +// +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers +func Referrers(ctx context.Context, store content.ReadOnlyGraphStorage, desc ocispec.Descriptor, artifactType string) ([]ocispec.Descriptor, error) { + if !descriptor.IsManifest(desc) { + return nil, fmt.Errorf("the descriptor %v is not a manifest: %w", desc, errdef.ErrUnsupported) + } + + var results []ocispec.Descriptor + + // use the Referrer API if it is available + if rf, ok := store.(ReferrerLister); ok { + if err := rf.Referrers(ctx, desc, artifactType, func(referrers []ocispec.Descriptor) error { + results = append(results, referrers...) + return nil + }); err != nil { + return nil, err + } + return results, nil + } + + predecessors, err := store.Predecessors(ctx, desc) + if err != nil { + return nil, err + } + for _, node := range predecessors { + switch node.MediaType { + case ocispec.MediaTypeImageManifest: + fetched, err := content.FetchAll(ctx, store, node) + if err != nil { + return nil, err + } + var manifest ocispec.Manifest + if err := json.Unmarshal(fetched, &manifest); err != nil { + return nil, err + } + if manifest.Subject == nil || !content.Equal(*manifest.Subject, desc) { + continue + } + node.ArtifactType = manifest.ArtifactType + if node.ArtifactType == "" { + node.ArtifactType = manifest.Config.MediaType + } + node.Annotations = manifest.Annotations + case ocispec.MediaTypeImageIndex: + fetched, err := content.FetchAll(ctx, store, node) + if err != nil { + return nil, err + } + var index ocispec.Index + if err := json.Unmarshal(fetched, &index); err != nil { + return nil, err + } + if index.Subject == nil || !content.Equal(*index.Subject, desc) { + continue + } + node.ArtifactType = index.ArtifactType + node.Annotations = index.Annotations + case spec.MediaTypeArtifactManifest: + fetched, err := content.FetchAll(ctx, store, node) + if err != nil { + return nil, err + } + var artifact spec.Artifact + if err := json.Unmarshal(fetched, &artifact); err != nil { + return nil, err + } + if artifact.Subject == nil || !content.Equal(*artifact.Subject, desc) { + continue + } + node.ArtifactType = artifact.ArtifactType + node.Annotations = artifact.Annotations + default: + continue + } + if artifactType == "" || artifactType == node.ArtifactType { + // the field artifactType in referrers descriptor is allowed to be empty + // https://github.com/opencontainers/distribution-spec/issues/458 + results = append(results, node) + } + } + return results, nil +} diff --git a/vendor/oras.land/oras-go/v2/target.go b/vendor/oras.land/oras-go/v2/target.go new file mode 100644 index 000000000000..c6dcaef9bab5 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/target.go @@ -0,0 +1,43 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import "oras.land/oras-go/v2/content" + +// Target is a CAS with generic tags. +type Target interface { + content.Storage + content.TagResolver +} + +// GraphTarget is a CAS with generic tags that supports direct predecessor node +// finding. +type GraphTarget interface { + content.GraphStorage + content.TagResolver +} + +// ReadOnlyTarget represents a read-only Target. +type ReadOnlyTarget interface { + content.ReadOnlyStorage + content.Resolver +} + +// ReadOnlyGraphTarget represents a read-only GraphTarget. +type ReadOnlyGraphTarget interface { + content.ReadOnlyGraphStorage + content.Resolver +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md new file mode 100644 index 000000000000..318a00923bc8 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md @@ -0,0 +1,70 @@ +# goyaml.v3 + +This package provides type and function aliases for the `go.yaml.in/yaml/v3` package (which is compatible with `gopkg.in/yaml.v3`). + +## Purpose + +The purpose of this package is to: + +1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v3 +2. Maintain compatibility with existing code while encouraging migration to the upstream package +3. Reduce maintenance overhead by delegating to the upstream implementation + +## Usage + +Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v3` directly: + +```go +// Old way +import "sigs.k8s.io/yaml/goyaml.v3" + +// Recommended way +import "go.yaml.in/yaml/v3" +``` + +## Available Types and Functions + +All public types and functions from `go.yaml.in/yaml/v3` are available through this package: + +### Types + +- `Unmarshaler` - Interface for custom unmarshaling behavior +- `Marshaler` - Interface for custom marshaling behavior +- `IsZeroer` - Interface to check if an object is zero +- `Decoder` - Reads and decodes YAML values from an input stream +- `Encoder` - Writes YAML values to an output stream +- `TypeError` - Error returned by Unmarshal for decoding issues +- `Node` - Represents a YAML node in the document +- `Kind` - Represents the kind of a YAML node +- `Style` - Represents the style of a YAML node + +### Functions + +- `Unmarshal` - Decodes YAML data into a Go value +- `Marshal` - Serializes a Go value into YAML +- `NewDecoder` - Creates a new Decoder +- `NewEncoder` - Creates a new Encoder + +## Migration Guide + +To migrate from this package to `go.yaml.in/yaml/v3`: + +1. Update your import statements: + ```go + // From + import "sigs.k8s.io/yaml/goyaml.v3" + + // To + import "go.yaml.in/yaml/v3" + ``` + +2. No code changes should be necessary as the API is identical + +3. Update your go.mod file to include the dependency: + ``` + require go.yaml.in/yaml/v3 v3.0.3 + ``` + +## Deprecation Notice + +All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v3` directly. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go new file mode 100644 index 000000000000..8826ffefeb55 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go @@ -0,0 +1,130 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + gopkg_yaml "go.yaml.in/yaml/v3" +) + +// Type aliases for public types from go.yaml.in/yaml/v3 +type ( + // Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document. + // Deprecated: Use go.yaml.in/yaml/v3.Unmarshaler directly. + Unmarshaler = gopkg_yaml.Unmarshaler + + // Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v3.Marshaler directly. + Marshaler = gopkg_yaml.Marshaler + + // IsZeroer is used to check whether an object is zero to determine whether it should be omitted when + // marshaling with the omitempty flag. One notable implementation is time.Time. + // Deprecated: Use go.yaml.in/yaml/v3.IsZeroer directly. + IsZeroer = gopkg_yaml.IsZeroer + + // Decoder reads and decodes YAML values from an input stream. + // Deprecated: Use go.yaml.in/yaml/v3.Decoder directly. + Decoder = gopkg_yaml.Decoder + + // Encoder writes YAML values to an output stream. + // Deprecated: Use go.yaml.in/yaml/v3.Encoder directly. + Encoder = gopkg_yaml.Encoder + + // TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded. + // Deprecated: Use go.yaml.in/yaml/v3.TypeError directly. + TypeError = gopkg_yaml.TypeError + + // Node represents a YAML node in the document. + // Deprecated: Use go.yaml.in/yaml/v3.Node directly. + Node = gopkg_yaml.Node + + // Kind represents the kind of a YAML node. + // Deprecated: Use go.yaml.in/yaml/v3.Kind directly. + Kind = gopkg_yaml.Kind + + // Style represents the style of a YAML node. + // Deprecated: Use go.yaml.in/yaml/v3.Style directly. + Style = gopkg_yaml.Style +) + +// Constants for Kind type from go.yaml.in/yaml/v3 +const ( + // DocumentNode represents a YAML document node. + // Deprecated: Use go.yaml.in/yaml/v3.DocumentNode directly. + DocumentNode = gopkg_yaml.DocumentNode + + // SequenceNode represents a YAML sequence node. + // Deprecated: Use go.yaml.in/yaml/v3.SequenceNode directly. + SequenceNode = gopkg_yaml.SequenceNode + + // MappingNode represents a YAML mapping node. + // Deprecated: Use go.yaml.in/yaml/v3.MappingNode directly. + MappingNode = gopkg_yaml.MappingNode + + // ScalarNode represents a YAML scalar node. + // Deprecated: Use go.yaml.in/yaml/v3.ScalarNode directly. + ScalarNode = gopkg_yaml.ScalarNode + + // AliasNode represents a YAML alias node. + // Deprecated: Use go.yaml.in/yaml/v3.AliasNode directly. + AliasNode = gopkg_yaml.AliasNode +) + +// Constants for Style type from go.yaml.in/yaml/v3 +const ( + // TaggedStyle represents a tagged YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.TaggedStyle directly. + TaggedStyle = gopkg_yaml.TaggedStyle + + // DoubleQuotedStyle represents a double-quoted YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.DoubleQuotedStyle directly. + DoubleQuotedStyle = gopkg_yaml.DoubleQuotedStyle + + // SingleQuotedStyle represents a single-quoted YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.SingleQuotedStyle directly. + SingleQuotedStyle = gopkg_yaml.SingleQuotedStyle + + // LiteralStyle represents a literal YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.LiteralStyle directly. + LiteralStyle = gopkg_yaml.LiteralStyle + + // FoldedStyle represents a folded YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.FoldedStyle directly. + FoldedStyle = gopkg_yaml.FoldedStyle + + // FlowStyle represents a flow YAML style. + // Deprecated: Use go.yaml.in/yaml/v3.FlowStyle directly. + FlowStyle = gopkg_yaml.FlowStyle +) + +// Function aliases for public functions from go.yaml.in/yaml/v3 +var ( + // Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value. + // Deprecated: Use go.yaml.in/yaml/v3.Unmarshal directly. + Unmarshal = gopkg_yaml.Unmarshal + + // Marshal serializes the value provided into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v3.Marshal directly. + Marshal = gopkg_yaml.Marshal + + // NewDecoder returns a new decoder that reads from r. + // Deprecated: Use go.yaml.in/yaml/v3.NewDecoder directly. + NewDecoder = gopkg_yaml.NewDecoder + + // NewEncoder returns a new encoder that writes to w. + // Deprecated: Use go.yaml.in/yaml/v3.NewEncoder directly. + NewEncoder = gopkg_yaml.NewEncoder +)