diff --git a/go.mod b/go.mod index 6bbbe9e7230e..7be9933e6621 100644 --- a/go.mod +++ b/go.mod @@ -80,35 +80,35 @@ replace ( github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20201214145556-6f1013f42f98 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210205160848-1fb0b6aa7392 + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210701171757-c3d8a1c6948d k8s.io/gengo => k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 k8s.io/klog => k8s.io/klog v1.0.0 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.1-0.20210205160848-1fb0b6aa7392 - k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210205160848-1fb0b6aa7392 - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210205160848-1fb0b6aa7392 + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.1-0.20210701171757-c3d8a1c6948d + k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210701171757-c3d8a1c6948d + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210701171757-c3d8a1c6948d k8s.io/system-validators => k8s.io/system-validators v1.0.4 ) diff --git a/go.sum b/go.sum index 38acba7bde45..17f7341fc623 100644 --- a/go.sum +++ b/go.sum @@ -629,54 +629,54 @@ github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab h1:lB github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 h1:7NmjUkJtGHpMTE/n8ia6itbCdZ7eYuTCXKc/zsA7OSM= github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= -github.com/openshift/kubernetes v1.20.1-0.20210205160848-1fb0b6aa7392 h1:vGmEp1aDw9U30IPsbbYh2daJnQ0jN/Bt60Qc9rNDslE= -github.com/openshift/kubernetes v1.20.1-0.20210205160848-1fb0b6aa7392/go.mod h1:WYL8/qQDpjRt1Rh9DNGRWe3V2ovnZnRJMffzC0gC1uc= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210205160848-1fb0b6aa7392 h1:7ewP7GkyG0UQd9LHl8W+T6JeZbvQdsG4pEH2+h8P76g= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:12gqmOZJjmP0ciWfqR5ANIxYgDdTchaRCQx+nTGBtm4= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210205160848-1fb0b6aa7392 h1:ojHXaZVsWGZjRbkFdv6e7RCrAR5SIBPMPm1mqYc6Fec= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:Tcd087AOtnbniuSkrQXpXxuN3y5UwtbXXTmLFiJca8s= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210205160848-1fb0b6aa7392 h1:eN2L1SO/drd4DyVbBXYyVywv9rCxJbtGKjP9ViE2nrg= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:3axNrcLyqkBWxqU7Xl8RrLVDvPZOq5XMUHZ7mEFAQTM= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210205160848-1fb0b6aa7392 h1:t2t23wcTuREKCMdiugV5O1HObIB3tDOl8EEZp7mm+JQ= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:ED6HfkyjucTiWY56MfTsOW48qtYewvR5IODc4JRMc6k= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210205160848-1fb0b6aa7392 h1:En13FwcBsNWLw/Or3JidMBVC5hGebhsqOdjiOKXwKIs= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:2PkJ8J+3tJgD3gqH85mPwUZ0Z4gjOtdgrauOUoCndO0= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210205160848-1fb0b6aa7392 h1:z92AtKTxm/qEdhdsVV91esGOaT/38uJk+K5mjSUNrNc= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:MfFvfPRc3qZnRjJYRnD8KZGtlN0XC+zUZu5wu5yfUz0= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210205160848-1fb0b6aa7392 h1:xYjomAK2ZaXr5l099T0PW/AcTCwhxg1Dp+3D7TEk9u4= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:UbIReMP6onJAgmEZQmjsi9PwQZnuy6mN6cA+NX63KtI= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210205160848-1fb0b6aa7392 h1:9QFaXSsT2dspgTxj+evjs15Z0sBWjWPF2VyNNKrjANs= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:DwF2HdGOWvyoCRRAYTArqotIiLrKDWnOFYNGSXYv5kc= -github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:IdL3q169r16+fn077pRij9q//URx7OmYa+Wthx/j8KA= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210205160848-1fb0b6aa7392 h1:ztGSUSrJ5if6RaIc4qgcn0baYSKAy10lJZCIeg1qLMI= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:+m5WoP0r8VlXaFWELe3Us7JCRVtcZPrgUJ82zGtICDw= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210205160848-1fb0b6aa7392 h1:GpNc8gJlQGpAerdegKlqmTWnLgHioPsSOcLYd0FBRsk= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:aYiAP0jm/mzzaAPEbSnjAzpIvB32BWs83YlyYN7vNtc= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210205160848-1fb0b6aa7392 h1:L6sVLGlUfVqXo/6uG7ZUCrJhchVCjwphspS/GFw+n2U= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:C4aXxyoP3zkEJSMu98FCHF7lNDSNcBxMe13/JNk3WX8= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210205160848-1fb0b6aa7392 h1:5nnKNJXVbtrDjL9C15dXmNDk92/uacTrc0TiQVJvv5k= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:4xhnGOFiv9lkvENieScdSfcQHzXH9DR6QZRrCH0amMo= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210205160848-1fb0b6aa7392 h1:tglDAOPEDCHDPUAqo0mM2f4/1vdinTnyqXOaqqV+NHo= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:yvSgp4ik96yUXtiMdgyHzrZRI0XNlq8Wfs2Q54+pICU= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210205160848-1fb0b6aa7392 h1:YaJffsmJ/7DCfIIoiZHX2N3kWZqoAmEGRkyHfZnOCuU= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:1q1yp9zAz75t3BOwP11lXWu9bKOEVr2D+Uuq/0tgaTc= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:G43SCvMMH8h7lJxYjXgy4Ik5i/xLkA5MQm1PHsuKQxc= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210205160848-1fb0b6aa7392 h1:pUEv0MtwT/KN7qwydszhO3fea4gYNHm4haRGyZm8RXc= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:T53q06WyCndb2KNmm3lm6MT37ES+AIMi9msiFacQgiM= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210205160848-1fb0b6aa7392 h1:T8ihSeUB2P8KiTPZepcWMuHgN4JPLbKD4RtNasaQ7f4= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:1eeLynEfXxcPS3rGxwkUpSbjJ/6LhqhBK6E8tIaNNFs= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210205160848-1fb0b6aa7392 h1:IjGpcGyZgsnTPiFCaw+/nKQ0t/vEG6GwXr/28QMHyUk= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:OpQzy3zR3K/YFh6yaTqUJjNzDLsV4F0mEOITBpXpJv0= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210205160848-1fb0b6aa7392 h1:z1Sl6ev6T3E84DZLPB6VjOMg5w4eiusV9kC4JdiLxGY= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:ry4feXn7kqXZhCrz+qGy576zyIpYATh0QTINIibRzRk= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210205160848-1fb0b6aa7392 h1:68ZN5NNr0xbeTJhvzYaC1KMB1a5lAJfPqS/M66ZNJIU= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:SZzO0CnWBCwbiYLkYwThH9vvD4d0Q1plgf0r2WY1S+w= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210205160848-1fb0b6aa7392 h1:EHcXdKRuKGr81ei8MoGLyWlskF3SN35z+yiT4H5ocyc= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:5UY8SSbdbpx1XBR5HUbS/QkW4poh3AJTQvW3443qQmg= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210205160848-1fb0b6aa7392 h1:CuNozkr3ee7pGfsCdUNQ9yv3osle6EZNwIU1AQPnLrA= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:mFeSjsWvLj55xUpwltalvolz49izW7J0N5RfoIHrKKY= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210205160848-1fb0b6aa7392 h1:Jrbs/MygMNIBu9CKJ5rmTaXNmBIdRzAHMlwuI+1BFXI= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210205160848-1fb0b6aa7392/go.mod h1:WtXfFgTtCy2EaR3FvGYUsA+wulDHkF664sjyQrFMPQw= +github.com/openshift/kubernetes v1.20.1-0.20210701171757-c3d8a1c6948d h1:7wFgtalHiNM5dCFsPb9z+BLtuaNH8SSl+OZWPKW3Wns= +github.com/openshift/kubernetes v1.20.1-0.20210701171757-c3d8a1c6948d/go.mod h1:WYL8/qQDpjRt1Rh9DNGRWe3V2ovnZnRJMffzC0gC1uc= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210701171757-c3d8a1c6948d h1:CMXNtJG//RCotjsb323ujyTzIRBjJDovnzqjNvD2lBo= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:12gqmOZJjmP0ciWfqR5ANIxYgDdTchaRCQx+nTGBtm4= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210701171757-c3d8a1c6948d h1:UlBvEHypSbtuiiyp/iYhXd9HqucNai90Xjv0Z8MhTAE= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:Tcd087AOtnbniuSkrQXpXxuN3y5UwtbXXTmLFiJca8s= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210701171757-c3d8a1c6948d h1:FlJrmMPuqysyEBeDTQTBqdTdH03pFIowFCAcG07vebA= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:3axNrcLyqkBWxqU7Xl8RrLVDvPZOq5XMUHZ7mEFAQTM= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210701171757-c3d8a1c6948d h1:MifxVrHw6HohUFvRv74YsSjKyjuXCVsme0A9FVCCR7A= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:ED6HfkyjucTiWY56MfTsOW48qtYewvR5IODc4JRMc6k= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210701171757-c3d8a1c6948d h1:9FddlqVdMEBo1rqdHubb7PCZDkpst5J6yqLIurctm7E= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:2PkJ8J+3tJgD3gqH85mPwUZ0Z4gjOtdgrauOUoCndO0= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210701171757-c3d8a1c6948d h1:tWH/utHzm1Wd52G2RjjahIDSO7zmltoBoeSysG/cmjw= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:MfFvfPRc3qZnRjJYRnD8KZGtlN0XC+zUZu5wu5yfUz0= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210701171757-c3d8a1c6948d h1:K2puKJnduz7Qpb5kwxyoFXpA/lLnaGAPlXyOT5uWyf4= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:UbIReMP6onJAgmEZQmjsi9PwQZnuy6mN6cA+NX63KtI= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210701171757-c3d8a1c6948d h1:rpmKKNBsl5E+3+vrryxuKP1j3w08CCVwSCKBtGVKYB0= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:DwF2HdGOWvyoCRRAYTArqotIiLrKDWnOFYNGSXYv5kc= +github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:IdL3q169r16+fn077pRij9q//URx7OmYa+Wthx/j8KA= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210701171757-c3d8a1c6948d h1:kOQuVerecYIj7pkzLKnBBqHMrJroJwDRUmJIyxlfHhY= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:+m5WoP0r8VlXaFWELe3Us7JCRVtcZPrgUJ82zGtICDw= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210701171757-c3d8a1c6948d h1:yzLHuCnaLqz/gdurUBB76Au164ZqzkDMyHtc6Akuab4= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:aYiAP0jm/mzzaAPEbSnjAzpIvB32BWs83YlyYN7vNtc= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210701171757-c3d8a1c6948d h1:aiLQp/d5havGqKXwiTBMYWKhFBE8JBwqEUWYMYG1eMI= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:C4aXxyoP3zkEJSMu98FCHF7lNDSNcBxMe13/JNk3WX8= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210701171757-c3d8a1c6948d h1:nTnA4IPMOmB3UKNhQ9rqS+6328/6Fw9YGG9KjREeuJM= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:4xhnGOFiv9lkvENieScdSfcQHzXH9DR6QZRrCH0amMo= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210701171757-c3d8a1c6948d h1:i9Bg6RajcUqP0LgBVLXyTE83FGHJDm3fGasQPyFMe3s= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:yvSgp4ik96yUXtiMdgyHzrZRI0XNlq8Wfs2Q54+pICU= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210701171757-c3d8a1c6948d h1:R5FNuasjinj/a96wfS2qJFs5HCzy28jaOn9nrZl6tjU= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:1q1yp9zAz75t3BOwP11lXWu9bKOEVr2D+Uuq/0tgaTc= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:G43SCvMMH8h7lJxYjXgy4Ik5i/xLkA5MQm1PHsuKQxc= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210701171757-c3d8a1c6948d h1:IwJbP6qgR9nS1Z5qzA02tBmPUzc29N+XCcCppYL+7LE= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:T53q06WyCndb2KNmm3lm6MT37ES+AIMi9msiFacQgiM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210701171757-c3d8a1c6948d h1:ImhFMkRz+6MmkawBP8IJLlPmbMEla2W5NEpk/OuE9Pw= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:1eeLynEfXxcPS3rGxwkUpSbjJ/6LhqhBK6E8tIaNNFs= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210701171757-c3d8a1c6948d h1:UCi9NXktvLlrd6ZCQ9x2NEDSulF60RQr+UeYDjS6Q6w= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:OpQzy3zR3K/YFh6yaTqUJjNzDLsV4F0mEOITBpXpJv0= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210701171757-c3d8a1c6948d h1:8o+tp+CZC7ihLt1xbdUYcu0Zjlc2zPGbf/8HKaaBeSQ= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:ry4feXn7kqXZhCrz+qGy576zyIpYATh0QTINIibRzRk= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210701171757-c3d8a1c6948d h1:HO/S/Us7xBSraU1IR7aTlZUvBKO1ScOnDZuY+ZrK9II= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:SZzO0CnWBCwbiYLkYwThH9vvD4d0Q1plgf0r2WY1S+w= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210701171757-c3d8a1c6948d h1:eiDLcgNxEIRfGPyCe+Bgw0HUiYNmVCTynDQdRsNbk7A= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:5UY8SSbdbpx1XBR5HUbS/QkW4poh3AJTQvW3443qQmg= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210701171757-c3d8a1c6948d h1:dSAsqcXN253GmYoCNnCzophrc6iZM1bLxqgtPCTLTxM= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:mFeSjsWvLj55xUpwltalvolz49izW7J0N5RfoIHrKKY= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210701171757-c3d8a1c6948d h1:rT0oB+oCGYQGWSLvZL3WVjP/XUGh9udTwQ/xNstbb7E= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210701171757-c3d8a1c6948d/go.mod h1:WtXfFgTtCy2EaR3FvGYUsA+wulDHkF664sjyQrFMPQw= github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b h1:zKZCcZgL3jf02/jf/ldUg8TvudOFcPGIdf8kFkWh5MI= github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible h1:6XSBotNi58b4MwVV4F9o/jd4BaQd+uJyz+s5TR0/ot8= @@ -1010,7 +1010,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201110211018-35f3e6cf4a65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 87d3b72c1830..7f8f17b929ed 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -323,7 +323,7 @@ var annotations = map[string]string{ "[Top Level] [sig-api-machinery] API data in etcd should be stored at the correct location and version for all resources [Serial]": "should be stored at the correct location and version for all resources [Serial] [Suite:openshift/conformance/serial]", - "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can be classified by testing flow-schemas/priority-levels": "should ensure that requests can be classified by testing flow-schemas/priority-levels [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration": "should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": "should ensure that requests can't be drowned out (fairness) [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -503,6 +503,12 @@ var annotations = map[string]string{ "[Top Level] [sig-api-machinery] Secrets should patch a secret [Conformance]": "should patch a secret [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s": "default timeout should be used if the specified timeout in the request URL is 0s [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL": "should return HTTP status code 400 if the user specifies an invalid timeout in the request URL [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed": "the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls": "should return chunks of results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]": "should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Suite:k8s]", @@ -2535,7 +2541,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2569,7 +2575,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2665,7 +2671,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2713,13 +2719,13 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (immediate-binding)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2899,9 +2905,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2941,7 +2947,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2975,7 +2981,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3071,7 +3077,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3119,13 +3125,13 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (immediate-binding)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3305,9 +3311,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI mock volume CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow]": "should bringup pod after deploying CSIDriver attach=false [Slow] [Suite:k8s]", @@ -3327,7 +3333,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI mock volume CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error": "should retry NodeStage after NodeStage final error [Suite:k8s]", - "[Top Level] [sig-storage] CSI mock volume CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists": "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists": "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on": "should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3641,7 +3647,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3675,7 +3681,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3771,7 +3777,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4011,7 +4017,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4045,7 +4051,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4141,7 +4147,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4381,7 +4387,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", @@ -4417,7 +4423,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4513,7 +4519,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4755,7 +4761,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4789,7 +4795,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4885,7 +4891,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5125,7 +5131,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5161,7 +5167,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5257,7 +5263,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5499,7 +5505,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5533,7 +5539,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5629,7 +5635,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5869,7 +5875,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5905,7 +5911,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6001,7 +6007,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6243,7 +6249,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6279,7 +6285,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6375,7 +6381,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6617,7 +6623,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6653,7 +6659,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6749,7 +6755,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7361,7 +7367,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7395,7 +7401,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7491,7 +7497,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7731,7 +7737,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7767,7 +7773,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7863,7 +7869,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8105,7 +8111,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8141,7 +8147,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8237,7 +8243,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8479,7 +8485,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8515,7 +8521,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8611,7 +8617,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8853,7 +8859,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8889,7 +8895,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8985,7 +8991,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9227,7 +9233,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9263,7 +9269,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9359,7 +9365,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9601,7 +9607,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9637,7 +9643,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9733,7 +9739,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9975,7 +9981,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10011,7 +10017,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10107,7 +10113,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10349,7 +10355,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10385,7 +10391,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", @@ -10481,7 +10487,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10723,7 +10729,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", @@ -10757,7 +10763,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -10853,7 +10859,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -11093,7 +11099,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11129,7 +11135,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11225,7 +11231,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11467,7 +11473,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11503,7 +11509,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11599,7 +11605,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/vendor/k8s.io/apiserver/pkg/audit/BUILD b/vendor/k8s.io/apiserver/pkg/audit/BUILD index 7f64aee1d923..0934acb2325e 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/BUILD +++ b/vendor/k8s.io/apiserver/pkg/audit/BUILD @@ -52,8 +52,12 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD index b03d79a2a555..e1cdfafd65f3 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -16,6 +16,7 @@ go_test( "cachecontrol_test.go", "impersonation_test.go", "metrics_test.go", + "request_deadline_test.go", "request_received_time_test.go", "requestinfo_test.go", "warning_test.go", @@ -24,6 +25,7 @@ go_test( deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", @@ -39,6 +41,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/github.com/google/uuid:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], @@ -56,6 +59,7 @@ go_library( "doc.go", "impersonation.go", "metrics.go", + "request_deadline.go", "request_received_time.go", "requestinfo.go", "storageversion.go", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go new file mode 100644 index 000000000000..1e43cdab20f1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilclock "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" +) + +const ( + // The 'timeout' query parameter in the request URL has an invalid duration specifier + invalidTimeoutInURL = "invalid timeout specified in the request URL" +) + +// WithRequestDeadline determines the timeout duration applicable to the given request and sets a new context +// with the appropriate deadline. +// auditWrapper provides an http.Handler that audits a failed request. +// longRunning returns true if he given request is a long running request. +// requestTimeoutMaximum specifies the default request timeout value. +func WithRequestDeadline(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunning request.LongRunningRequestCheck, + negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration) http.Handler { + return withRequestDeadline(handler, sink, policy, longRunning, negotiatedSerializer, requestTimeoutMaximum, utilclock.RealClock{}) +} + +func withRequestDeadline(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunning request.LongRunningRequestCheck, + negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration, clock utilclock.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + handleError(w, req, http.StatusInternalServerError, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong")) + return + } + if longRunning(req, requestInfo) { + handler.ServeHTTP(w, req) + return + } + + userSpecifiedTimeout, ok, err := parseTimeout(req) + if err != nil { + statusErr := apierrors.NewBadRequest(fmt.Sprintf("%s", err.Error())) + + klog.Errorf("Error - %s: %#v", err.Error(), req.RequestURI) + + failed := failedErrorHandler(negotiatedSerializer, statusErr) + failWithAudit := withFailedRequestAudit(failed, statusErr, sink, policy) + failWithAudit.ServeHTTP(w, req) + return + } + + timeout := requestTimeoutMaximum + if ok { + // we use the default timeout enforced by the apiserver: + // - if the user has specified a timeout of 0s, this implies no timeout on the user's part. + // - if the user has specified a timeout that exceeds the maximum deadline allowed by the apiserver. + if userSpecifiedTimeout > 0 && userSpecifiedTimeout < requestTimeoutMaximum { + timeout = userSpecifiedTimeout + } + } + + started := clock.Now() + if requestStartedTimestamp, ok := request.ReceivedTimestampFrom(ctx); ok { + started = requestStartedTimestamp + } + + ctx, cancel := context.WithDeadline(ctx, started.Add(timeout)) + defer cancel() + + req = req.WithContext(ctx) + handler.ServeHTTP(w, req) + }) +} + +// withFailedRequestAudit decorates a failed http.Handler and is used to audit a failed request. +// statusErr is used to populate the Message property of ResponseStatus. +func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.StatusError, sink audit.Sink, policy policy.Checker) http.Handler { + if sink == nil || policy == nil { + return failedHandler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) + responsewriters.InternalError(w, req, errors.New("failed to create audit event")) + return + } + if ev == nil { + failedHandler.ServeHTTP(w, req) + return + } + + ev.ResponseStatus = &metav1.Status{} + ev.Stage = auditinternal.StageResponseStarted + if statusErr != nil { + ev.ResponseStatus.Message = statusErr.Error() + } + + rw := decorateResponseWriter(w, ev, sink, omitStages) + failedHandler.ServeHTTP(rw, req) + }) +} + +// failedErrorHandler returns an http.Handler that uses the specified StatusError object +// to render an error response to the request. +func failedErrorHandler(s runtime.NegotiatedSerializer, statusError *apierrors.StatusError) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + + gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + responsewriters.ErrorNegotiated(statusError, s, gv, w, req) + }) +} + +// parseTimeout parses the given HTTP request URL and extracts the timeout query parameter +// value if specified by the user. +// If a timeout is not specified the function returns false and err is set to nil +// If the value specified is malformed then the function returns false and err is set +func parseTimeout(req *http.Request) (time.Duration, bool, error) { + value := req.URL.Query().Get("timeout") + if value == "" { + return 0, false, nil + } + + timeout, err := time.ParseDuration(value) + if err != nil { + return 0, false, fmt.Errorf("%s - %s", invalidTimeoutInURL, err.Error()) + } + + return timeout, true, nil +} + +func handleError(w http.ResponseWriter, r *http.Request, code int, err error) { + errorMsg := fmt.Sprintf("Error - %s: %#v", err.Error(), r.RequestURI) + http.Error(w, errorMsg, code) + klog.Errorf(errorMsg) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 631914f36aac..e1072d99c5a3 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -57,9 +57,6 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int return } - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - namespace, name, err := scope.Namer.Name(req) if err != nil { if includeName { @@ -76,7 +73,9 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int } } - ctx, cancel := context.WithTimeout(req.Context(), timeout) + // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided + // timeout inside the parent context is lower than requestTimeoutUpperBound. + ctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound) defer cancel() outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { @@ -157,7 +156,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int } // Dedup owner references before updating managed fields dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) - result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := finishRequest(ctx, func() (runtime.Object, error) { if scope.FieldManager != nil { liveObj, err := scope.Creater.New(scope.Kind) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 892aaf4a0d22..0fd87f74750a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -54,16 +54,17 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc return } - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } - ctx, cancel := context.WithTimeout(req.Context(), timeout) + + // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided + // timeout inside the parent context is lower than requestTimeoutUpperBound. + ctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound) defer cancel() + ctx = request.WithNamespace(ctx, namespace) ae := request.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) @@ -123,7 +124,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc wasDeleted := true userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) - result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := finishRequest(ctx, func() (runtime.Object, error) { obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) wasDeleted = deleted return obj, err @@ -172,17 +173,17 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc return } - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - namespace, err := scope.Namer.Namespace(req) if err != nil { scope.err(err, w, req) return } - ctx, cancel := context.WithTimeout(req.Context(), timeout) + // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided + // timeout inside the parent context is lower than requestTimeoutUpperBound. + ctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound) defer cancel() + ctx = request.WithNamespace(ctx, namespace) ae := request.AuditEventFrom(ctx) @@ -265,7 +266,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc admit = admission.WithAudit(admit, ae) userInfo, _ := request.UserFrom(ctx) staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) - result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := finishRequest(ctx, func() (runtime.Object, error) { return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) }) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 096330a4ae9c..3d720032804a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -84,19 +84,17 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac return } - // TODO: we either want to remove timeout or document it (if we - // document, move timeout out of this function and declare it in - // api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } - ctx, cancel := context.WithTimeout(req.Context(), timeout) + // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided + // timeout inside the parent context is lower than requestTimeoutUpperBound. + ctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound) defer cancel() + ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) @@ -208,7 +206,6 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac codec: codec, - timeout: timeout, options: options, restPatcher: r, @@ -271,7 +268,6 @@ type patcher struct { codec runtime.Codec - timeout time.Duration options *metav1.PatchOptions // Operation information @@ -591,7 +587,7 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti wasCreated = created return updateObject, updateErr } - result, err := finishRequest(p.timeout, func() (runtime.Object, error) { + result, err := finishRequest(ctx, func() (runtime.Object, error) { result, err := requestFunc() // If the object wasn't committed to storage because it's serialized size was too large, // it is safe to remove managedFields (which can be large) and try again. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 01396f2716f2..ce8a2a32dc98 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -53,6 +53,9 @@ import ( ) const ( + // 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout. + // Everyone chooses 30. + requestTimeoutUpperBound = 34 * time.Second // DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains // duplicate owner reference entries. DuplicateOwnerReferencesWarningFormat = ".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" @@ -227,7 +230,7 @@ type resultFunc func() (runtime.Object, error) // finishRequest makes a given resultFunc asynchronous and handles errors returned by the response. // An api.Status object with status != success is considered an "error", which interrupts the normal response flow. -func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) { +func finishRequest(ctx context.Context, fn resultFunc) (result runtime.Object, err error) { // these channels need to be buffered to prevent the goroutine below from hanging indefinitely // when the select statement reads something other than the one the goroutine sends on. ch := make(chan runtime.Object, 1) @@ -271,8 +274,8 @@ func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, return nil, err case p := <-panicCh: panic(p) - case <-time.After(timeout): - return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout %s", timeout), 0) + case <-ctx.Done(): + return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout %s", ctx.Err()), 0) } } @@ -487,18 +490,6 @@ func limitedReadBody(req *http.Request, limit int64) ([]byte, error) { return data, nil } -func parseTimeout(str string) time.Duration { - if str != "" { - timeout, err := time.ParseDuration(str) - if err == nil { - return timeout - } - klog.Errorf("Failed to parse %q: %v", str, err) - } - // 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout. Everyone chooses 30. - return 34 * time.Second -} - func isDryRun(url *url.URL) bool { return len(url.Query()["dryRun"]) != 0 } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index f0473323f995..cbee7167b193 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -54,16 +54,17 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa return } - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - namespace, name, err := scope.Namer.Name(req) if err != nil { scope.err(err, w, req) return } - ctx, cancel := context.WithTimeout(req.Context(), timeout) + + // enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided + // timeout inside the parent context is lower than requestTimeoutUpperBound. + ctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound) defer cancel() + ctx = request.WithNamespace(ctx, namespace) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) @@ -195,7 +196,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa } // Dedup owner references before updating managed fields dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) - result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := finishRequest(ctx, func() (runtime.Object, error) { result, err := requestFunc() // If the object wasn't committed to storage because it's serialized size was too large, // it is safe to remove managedFields (which can be large) and try again. diff --git a/vendor/k8s.io/apiserver/pkg/server/BUILD b/vendor/k8s.io/apiserver/pkg/server/BUILD index 39e7481ed7f6..7904109dbb83 100644 --- a/vendor/k8s.io/apiserver/pkg/server/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/BUILD @@ -12,6 +12,7 @@ go_test( "config_selfclient_test.go", "config_test.go", "genericapiserver_test.go", + "graceful_shutdown_test.go", "healthz_test.go", ], embed = [":go_default_library"], @@ -47,6 +48,7 @@ go_test( "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", "//vendor/k8s.io/utils/net:go_default_library", ], @@ -63,6 +65,8 @@ go_library( "handler.go", "healthz.go", "hooks.go", + "patch.go", + "patch_genericapiserver.go", "plugins.go", "secure_serving.go", "signal.go", @@ -72,6 +76,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server", importpath = "k8s.io/apiserver/pkg/server", deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -122,6 +127,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/flowcontrol:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/openapi:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/logs:go_default_library", @@ -130,6 +137,7 @@ go_library( "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/google/uuid:go_default_library", + "//vendor/go.uber.org/atomic:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index ea7fb4bf9b21..fdba11c1fa48 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -830,7 +830,13 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = filterlatency.TrackStarted(handler, "authentication") handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") - handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout) + + // WithTimeoutForNonLongRunningRequests will call the rest of the request handling in a go-routine with the + // context with deadline. The go-routine can keep running, while the timeout logic will return a timeout to the client. + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc) + + handler = genericapifilters.WithRequestDeadline(handler, c.AuditBackend, c.AuditPolicyChecker, + c.LongRunningFunc, c.Serializer, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) handler = WithNonReadyRequestLogging(handler, c.hasBeenReadyCh) handler = WithLateConnectionFilter(handler) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index 2405bfd1ff92..ccbed60dba5d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -18,14 +18,12 @@ package filters import ( "bufio" - "context" "encoding/json" "fmt" "net" "net/http" "runtime" "sync" - "time" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -34,37 +32,33 @@ import ( ) // WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by timeout. -func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, timeout time.Duration) http.Handler { +func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apirequest.LongRunningRequestCheck) http.Handler { if longRunning == nil { return handler } - timeoutFunc := func(req *http.Request) (*http.Request, <-chan time.Time, func(), *apierrors.StatusError) { + timeoutFunc := func(req *http.Request) (*http.Request, bool, func(), *apierrors.StatusError) { // TODO unify this with apiserver.MaxInFlightLimit ctx := req.Context() requestInfo, ok := apirequest.RequestInfoFrom(ctx) if !ok { // if this happens, the handler chain isn't setup correctly because there is no request info - return req, time.After(timeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) + return req, false, func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) } if longRunning(req, requestInfo) { - return req, nil, nil, nil + return req, true, nil, nil } - ctx, cancel := context.WithCancel(ctx) - req = req.WithContext(ctx) - postTimeoutFn := func() { - cancel() metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout) } - return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) + return req, false, postTimeoutFn, apierrors.NewTimeoutError("request did not complete within the allotted timeout", 0) } return WithTimeout(handler, timeoutFunc) } -type timeoutFunc = func(*http.Request) (req *http.Request, timeout <-chan time.Time, postTimeoutFunc func(), err *apierrors.StatusError) +type timeoutFunc = func(*http.Request) (req *http.Request, longRunning bool, postTimeoutFunc func(), err *apierrors.StatusError) // WithTimeout returns an http.Handler that runs h with a timeout // determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle @@ -85,12 +79,14 @@ type timeoutHandler struct { } func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r, after, postTimeoutFn, err := t.timeout(r) - if after == nil { + r, longRunning, postTimeoutFn, err := t.timeout(r) + if longRunning { t.handler.ServeHTTP(w, r) return } + timeoutCh := r.Context().Done() + // resultCh is used as both errCh and stopCh resultCh := make(chan interface{}) tw := newTimeoutWriter(w) @@ -117,7 +113,7 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { panic(err) } return - case <-after: + case <-timeoutCh: defer func() { // resultCh needs to have a reader, since the function doing // the work needs to send to it. This is defer'd to ensure it runs diff --git a/vendor/k8s.io/apiserver/pkg/server/signal.go b/vendor/k8s.io/apiserver/pkg/server/signal.go index e5334ae4c15f..bdd2728f8abf 100644 --- a/vendor/k8s.io/apiserver/pkg/server/signal.go +++ b/vendor/k8s.io/apiserver/pkg/server/signal.go @@ -20,6 +20,8 @@ import ( "context" "os" "os/signal" + + "k8s.io/klog/v2" ) var onlyOneSignalHandler = make(chan struct{}) @@ -34,10 +36,26 @@ func SetupSignalHandler() <-chan struct{} { return SetupSignalContext().Done() } +// SetupSignalHandlerIgnoringFurtherSignals is the same as SetupSignalContext, except +// it ignores further exit signals after receiving the first one. +func SetupSignalHandlerIgnoringFurtherSignals() <-chan struct{} { + return SetupSignalContextNotExiting().Done() +} + // SetupSignalContext is same as SetupSignalHandler, but a context.Context is returned. // Only one of SetupSignalContext and SetupSignalHandler should be called, and only can // be called once. func SetupSignalContext() context.Context { + return setupSignalContext(true) +} + +// SetupSignalContextNotExiting is the same as SetupSignalContext, except +// it ignores further exit signals after receiving the first one. +func SetupSignalContextNotExiting() context.Context { + return setupSignalContext(false) +} + +func setupSignalContext(exitOnSecondSignal bool) context.Context { close(onlyOneSignalHandler) // panics when called twice shutdownHandler = make(chan os.Signal, 2) @@ -47,8 +65,15 @@ func SetupSignalContext() context.Context { go func() { <-shutdownHandler cancel() - <-shutdownHandler - os.Exit(1) // second signal. Exit directly. + if exitOnSecondSignal { + <-shutdownHandler + os.Exit(1) + } else { + for { + <-shutdownHandler + klog.Infof("Termination signal has been received already. Ignoring signal.") + } + } }() return ctx diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index edca51a5714e..36856aa32a35 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -178,7 +178,7 @@ cluster's shared state through which all other components interact.`, return utilerrors.NewAggregate(errs) } - return Run(completedOptions, genericapiserver.SetupSignalHandler()) + return Run(completedOptions, genericapiserver.SetupSignalHandlerIgnoringFurtherSignals()) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index 9704f2c30284..a08d6400686f 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -88,9 +88,6 @@ var ( "MetricsGrabber should grab all metrics from a ControllerManager", "MetricsGrabber should grab all metrics from a Scheduler", - // https://bugzilla.redhat.com/show_bug.cgi?id=1906518 - `\[Feature:VolumeSnapshotDataSource\]`, - // https://bugzilla.redhat.com/show_bug.cgi?id=1906808 `ServiceAccounts should support OIDC discovery of service account issuer`, }, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index fd732430d1ae..8db1638d1793 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -983,7 +983,6 @@ func ensureSystemCgroups(rootCgroupPath string, manager cgroups.Manager) error { pids = append(pids, pid) } - klog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) // Check if we have moved all the non-kernel PIDs. if len(pids) == 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go index 2f29ddd576ff..ea71490ff4d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go @@ -17,21 +17,42 @@ limitations under the License. package config import ( - "k8s.io/api/core/v1" + "time" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) +// WaitForAPIServerSyncPeriod is the period between checks for the node list/watch initial sync +const WaitForAPIServerSyncPeriod = 1 * time.Second + // NewSourceApiserver creates a config source that watches and pulls from the apiserver. -func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, updates chan<- interface{}) { +func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, nodeHasSynced func() bool, updates chan<- interface{}) { lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) - newSourceApiserverFromLW(lw, updates) + + // The Reflector responsible for watching pods at the apiserver should be run only after + // the node sync with the apiserver has completed. + klog.Info("Waiting for node sync before watching apiserver pods") + go func() { + for { + if nodeHasSynced() { + klog.V(4).Info("node sync completed") + break + } + time.Sleep(WaitForAPIServerSyncPeriod) + klog.V(4).Info("node sync has not completed yet") + } + klog.Info("Watching apiserver") + newSourceApiserverFromLW(lw, updates) + }() } // newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go index 2f525357df5b..5466dd8b3212 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go @@ -135,7 +135,7 @@ func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.G // - whenever a pod is created or updated, we start individual watches for all // referenced objects that aren't referenced from other registered pods // - every GetObject() returns a value from local cache propagated via watches -func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager { +func NewWatchingConfigMapManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager { listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts) } @@ -153,6 +153,6 @@ func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager { } gr := corev1.Resource("configmap") return &configMapManager{ - manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, isImmutable, gr, getConfigMapNames), + manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, isImmutable, gr, resyncInterval, getConfigMapNames), } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 5c393917f77e..944c8c8cd820 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -442,8 +442,9 @@ func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Sign observations, _ := makeSignalObservations(summary) debugLogObservations("observations after resource reclaim", observations) - // determine the set of thresholds met independent of grace period - thresholds := thresholdsMet(m.config.Thresholds, observations, false) + // evaluate all thresholds independently of their grace period to see if with + // the new observations, we think we have met min reclaim goals + thresholds := thresholdsMet(m.config.Thresholds, observations, true) debugLogThresholdsWithObservation("thresholds after resource reclaim - ignoring grace period", thresholds, observations) if len(thresholds) == 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 1367f248250a..78e7d79c3d1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -124,9 +124,6 @@ const ( // Max amount of time to wait for the container runtime to come up. maxWaitForContainerRuntime = 30 * time.Second - // Max amount of time to wait for node list/watch to initially sync - maxWaitForAPIServerSync = 10 * time.Second - // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. nodeStatusUpdateRetry = 5 @@ -247,7 +244,7 @@ type DockerOptions struct { // makePodSourceConfig creates a config.PodConfig from the given // KubeletConfiguration or returns an error. -func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) { +func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) { manifestURLHeader := make(http.Header) if len(kubeCfg.StaticPodURLHeader) > 0 { for k, v := range kubeCfg.StaticPodURLHeader { @@ -273,8 +270,8 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku } if kubeDeps.KubeClient != nil { - klog.Infof("Watching apiserver") - config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, cfg.Channel(kubetypes.ApiserverSource)) + klog.Info("Adding apiserver pod source") + config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, nodeHasSynced, cfg.Channel(kubetypes.ApiserverSource)) } return cfg, nil } @@ -380,9 +377,32 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } } + var nodeHasSynced cache.InformerSynced + var nodeLister corelisters.NodeLister + + // If kubeClient == nil, we are running in standalone mode (i.e. no API servers) + // If not nil, we are running as part of a cluster and should sync w/API + if kubeDeps.KubeClient != nil { + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fields.Set{api.ObjectNameField: string(nodeName)}.String() + })) + nodeLister = kubeInformers.Core().V1().Nodes().Lister() + nodeHasSynced = func() bool { + return kubeInformers.Core().V1().Nodes().Informer().HasSynced() + } + kubeInformers.Start(wait.NeverStop) + klog.Info("Attempting to sync node with API server") + } else { + // we don't have a client to sync! + nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + nodeLister = corelisters.NewNodeLister(nodeIndexer) + nodeHasSynced = func() bool { return true } + klog.Info("Kubelet is running in standalone mode, will skip API server sync") + } + if kubeDeps.PodConfig == nil { var err error - kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName) + kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, nodeHasSynced) if err != nil { return nil, err } @@ -434,31 +454,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, serviceHasSynced = func() bool { return true } } - var nodeHasSynced cache.InformerSynced - var nodeLister corelisters.NodeLister - - if kubeDeps.KubeClient != nil { - kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.FieldSelector = fields.Set{api.ObjectNameField: string(nodeName)}.String() - })) - nodeLister = kubeInformers.Core().V1().Nodes().Lister() - nodeHasSynced = func() bool { - if kubeInformers.Core().V1().Nodes().Informer().HasSynced() { - return true - } - klog.Infof("kubelet nodes not sync") - return false - } - kubeInformers.Start(wait.NeverStop) - klog.Info("Kubelet client is not nil") - } else { - // we dont have a client to sync! - nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) - nodeLister = corelisters.NewNodeLister(nodeIndexer) - nodeHasSynced = func() bool { return true } - klog.Info("Kubelet client is nil") - } - // construct a node reference used for events nodeRef := &v1.ObjectReference{ Kind: "Node", @@ -546,8 +541,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, var configMapManager configmap.Manager switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy { case kubeletconfiginternal.WatchChangeDetectionStrategy: - secretManager = secret.NewWatchingSecretManager(kubeDeps.KubeClient) - configMapManager = configmap.NewWatchingConfigMapManager(kubeDeps.KubeClient) + secretManager = secret.NewWatchingSecretManager(kubeDeps.KubeClient, klet.resyncInterval) + configMapManager = configmap.NewWatchingConfigMapManager(kubeDeps.KubeClient, klet.resyncInterval) case kubeletconfiginternal.TTLCacheChangeDetectionStrategy: secretManager = secret.NewCachingSecretManager( kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode)) @@ -2018,8 +2013,9 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir } // optimization: avoid invoking the pod worker if no further changes are possible to the pod definition - if podWorkerTerminal { - klog.V(4).Infof("Pod %q has completed, ignoring remaining sync work: %s", format.Pod(pod), syncType) + // (i.e. the pod has completed and its containers have been terminated) + if podWorkerTerminal && containersTerminal { + klog.V(4).InfoS("Pod has completed and its containers have been terminated, ignoring remaining sync work", "pod", klog.KObj(pod), "syncType", syncType) return } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index e8e2146d7444..f786a6921b21 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -22,7 +22,6 @@ import ( "io/ioutil" "net" "path/filepath" - "time" cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorv2 "github.com/google/cadvisor/info/v2" @@ -33,7 +32,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -237,15 +235,6 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) { if kl.kubeClient == nil { return kl.initialNode(context.TODO()) } - // if we have a valid kube client, we wait for initial lister to sync - if !kl.nodeHasSynced() { - err := wait.PollImmediate(time.Second, maxWaitForAPIServerSync, func() (bool, error) { - return kl.nodeHasSynced(), nil - }) - if err != nil { - return nil, fmt.Errorf("nodes have not yet been read at least once, cannot construct node object") - } - } return kl.nodeLister.Get(string(kl.nodeName)) } @@ -256,7 +245,7 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) { // zero capacity, and the default labels. func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) { if kl.kubeClient != nil { - if n, err := kl.GetNode(); err == nil { + if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil { return n, nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index fbd70f3cbd88..f465952e6d03 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -1137,7 +1137,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // PodKiller handles requests for killing pods type PodKiller interface { // KillPod receives pod speficier representing the pod to kill - KillPod(pair *kubecontainer.PodPair) + KillPod(podPair *kubecontainer.PodPair) // PerformPodKillingWork performs the actual pod killing work via calling CRI // It returns after its Close() func is called and all outstanding pod killing requests are served PerformPodKillingWork() @@ -1215,10 +1215,9 @@ func (pk *podKillerWithChannel) markPodTerminated(uid string) { delete(pk.podTerminationMap, uid) } -// checkAndMarkPodPendingTerminationByPod checks to see if the pod is being -// killed and returns true if it is, otherwise the pod is added to the map and -// returns false -func (pk *podKillerWithChannel) checkAndMarkPodPendingTerminationByPod(podPair *kubecontainer.PodPair) bool { +// KillPod sends pod killing request to the killer after marks the pod +// unless the given pod has been marked to be killed +func (pk *podKillerWithChannel) KillPod(podPair *kubecontainer.PodPair) { pk.podKillingLock.Lock() defer pk.podKillingLock.Unlock() var apiPodExists bool @@ -1249,9 +1248,10 @@ func (pk *podKillerWithChannel) checkAndMarkPodPendingTerminationByPod(podPair * } else { klog.V(4).Infof("running pod %q is pending termination", podPair.RunningPod.ID) } - return true + return } - return false + // Limit to one request per pod + pk.podKillingCh <- podPair } // Close closes the channel through which requests are delivered @@ -1259,20 +1259,10 @@ func (pk *podKillerWithChannel) Close() { close(pk.podKillingCh) } -// KillPod sends pod killing request to the killer -func (pk *podKillerWithChannel) KillPod(pair *kubecontainer.PodPair) { - pk.podKillingCh <- pair -} - // PerformPodKillingWork launches a goroutine to kill a pod received from the channel if // another goroutine isn't already in action. func (pk *podKillerWithChannel) PerformPodKillingWork() { for podPair := range pk.podKillingCh { - if pk.checkAndMarkPodPendingTerminationByPod(podPair) { - // Pod is already being killed - continue - } - runningPod := podPair.RunningPod apiPod := podPair.APIPod diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index a7190df20876..bf6cec514eae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -101,7 +101,9 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS return nil, err } - kubeRuntimeManager.containerGC = newContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) + podStateProvider := newFakePodStateProvider() + kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) + kubeRuntimeManager.podStateProvider = podStateProvider kubeRuntimeManager.runtimeName = typedVersion.RuntimeName kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 7bc8e7799e6a..d0ac368f09c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -137,6 +137,9 @@ type kubeGenericRuntimeManager struct { // Cache last per-container error message to reduce log spam logReduction *logreduction.LogReduction + + // PodState provider instance + podStateProvider podStateProvider } // KubeGenericRuntime is a interface contains interfaces for container runtime and command. @@ -248,6 +251,7 @@ func NewKubeGenericRuntimeManager( imagePullBurst) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager) kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) + kubeRuntimeManager.podStateProvider = podStateProvider kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -751,6 +755,14 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine result.AddSyncResult(createSandboxResult) podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) if err != nil { + // createPodSandbox can return an error from CNI, CSI, + // or CRI if the Pod has been deleted while the POD is + // being created. If the pod has been deleted then it's + // not a real error. + if m.podStateProvider.IsPodDeleted(pod.UID) { + klog.V(4).Infof("Pod %q was deleted and sandbox failed to be created: %v", format.Pod(pod), pod.UID) + return + } createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) klog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) ref, referr := ref.GetReference(legacyscheme.Scheme, pod) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go b/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go index 71275f1510bc..2d383e0a39ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go @@ -26,7 +26,7 @@ import ( cadvisorapiv1 "github.com/google/cadvisor/info/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" @@ -83,13 +83,13 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs if err := validateNodeIPFunc(nodeIP); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) } - klog.V(2).Infof("Using node IP: %q", nodeIP.String()) + klog.V(4).Infof("Using node IP: %q", nodeIP.String()) } if secondaryNodeIPSpecified { if err := validateNodeIPFunc(secondaryNodeIP); err != nil { return fmt.Errorf("failed to validate secondaryNodeIP: %v", err) } - klog.V(2).Infof("Using secondary node IP: %q", secondaryNodeIP.String()) + klog.V(4).Infof("Using secondary node IP: %q", secondaryNodeIP.String()) } if externalCloudProvider { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go index fc4289213480..93d0934c2800 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -38,8 +38,8 @@ const ( // and 1000. Containers with higher OOM scores are killed if the system runs out of memory. // See https://lwn.net/Articles/391222/ for more information. func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int { - if types.IsCriticalPod(pod) { - // Critical pods should be the last to get killed. + if types.IsNodeCriticalPod(pod) { + // Only node critical pod should be the last to get killed. return guaranteedOOMScoreAdj } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go index 3aa132a5bfaf..af17711eeee4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go @@ -136,7 +136,7 @@ func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetO // - whenever a pod is created or updated, we start individual watches for all // referenced objects that aren't referenced from other registered pods // - every GetObject() returns a value from local cache propagated via watches -func NewWatchingSecretManager(kubeClient clientset.Interface) Manager { +func NewWatchingSecretManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager { listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { return kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts) } @@ -154,6 +154,6 @@ func NewWatchingSecretManager(kubeClient clientset.Interface) Manager { } gr := corev1.Resource("secret") return &secretManager{ - manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, isImmutable, gr, getSecretNames), + manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, isImmutable, gr, resyncInterval, getSecretNames), } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 8b64ea1de3cb..953e888ef17b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -179,3 +179,8 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool { func IsCriticalPodBasedOnPriority(priority int32) bool { return priority >= scheduling.SystemCriticalPriority } + +// IsNodeCriticalPod checks if the given pod is a system-node-critical +func IsNodeCriticalPod(pod *v1.Pod) bool { + return IsCriticalPod(pod) && (pod.Spec.PriorityClassName == scheduling.SystemNodeCritical) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go index 4dfe10375393..cee515cae328 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -46,25 +47,108 @@ type isImmutableFunc func(runtime.Object) bool // objectCacheItem is a single item stored in objectCache. type objectCacheItem struct { refCount int - store cache.Store + store *cacheStore + reflector *cache.Reflector + hasSynced func() (bool, error) - // lock is protecting from closing stopCh multiple times. - lock sync.Mutex - stopCh chan struct{} + // waitGroup is used to ensure that there won't be two concurrent calls to reflector.Run + waitGroup sync.WaitGroup + + // lock is to ensure the access and modify of lastAccessTime, stopped, and immutable are thread safety, + // and protecting from closing stopCh multiple times. + lock sync.Mutex + lastAccessTime time.Time + stopped bool + immutable bool + stopCh chan struct{} } func (i *objectCacheItem) stop() bool { i.lock.Lock() defer i.lock.Unlock() - select { - case <-i.stopCh: - // This means that channel is already closed. + return i.stopThreadUnsafe() +} + +func (i *objectCacheItem) stopThreadUnsafe() bool { + if i.stopped { return false - default: - close(i.stopCh) - return true } + i.stopped = true + close(i.stopCh) + if !i.immutable { + i.store.unsetInitialized() + } + return true +} + +func (i *objectCacheItem) setLastAccessTime(time time.Time) { + i.lock.Lock() + defer i.lock.Unlock() + i.lastAccessTime = time +} + +func (i *objectCacheItem) setImmutable() { + i.lock.Lock() + defer i.lock.Unlock() + i.immutable = true +} + +func (i *objectCacheItem) stopIfIdle(now time.Time, maxIdleTime time.Duration) bool { + i.lock.Lock() + defer i.lock.Unlock() + if !i.stopped && now.After(i.lastAccessTime.Add(maxIdleTime)) { + return i.stopThreadUnsafe() + } + return false +} + +func (i *objectCacheItem) restartReflectorIfNeeded() { + i.lock.Lock() + defer i.lock.Unlock() + if i.immutable || !i.stopped { + return + } + i.stopCh = make(chan struct{}) + i.stopped = false + go i.startReflector() +} + +func (i *objectCacheItem) startReflector() { + i.waitGroup.Wait() + i.waitGroup.Add(1) + defer i.waitGroup.Done() + i.reflector.Run(i.stopCh) +} + +// cacheStore is in order to rewrite Replace function to mark initialized flag +type cacheStore struct { + cache.Store + lock sync.Mutex + initialized bool +} + +func (c *cacheStore) Replace(list []interface{}, resourceVersion string) error { + c.lock.Lock() + defer c.lock.Unlock() + err := c.Store.Replace(list, resourceVersion) + if err != nil { + return err + } + c.initialized = true + return nil +} + +func (c *cacheStore) hasSynced() bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.initialized +} + +func (c *cacheStore) unsetInitialized() { + c.lock.Lock() + defer c.lock.Unlock() + c.initialized = false } // objectCache is a local cache of objects propagated via @@ -75,35 +159,53 @@ type objectCache struct { newObject newObjectFunc isImmutable isImmutableFunc groupResource schema.GroupResource + clock clock.Clock + maxIdleTime time.Duration lock sync.RWMutex items map[objectKey]*objectCacheItem } +const minIdleTime = 1 * time.Minute + // NewObjectCache returns a new watch-based instance of Store interface. func NewObjectCache( listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, isImmutable isImmutableFunc, - groupResource schema.GroupResource) Store { - return &objectCache{ + groupResource schema.GroupResource, + clock clock.Clock, + maxIdleTime time.Duration) Store { + + if maxIdleTime < minIdleTime { + maxIdleTime = minIdleTime + } + + store := &objectCache{ listObject: listObject, watchObject: watchObject, newObject: newObject, isImmutable: isImmutable, groupResource: groupResource, + clock: clock, + maxIdleTime: maxIdleTime, items: make(map[objectKey]*objectCacheItem), } + + // TODO propagate stopCh from the higher level. + go wait.Until(store.startRecycleIdleWatch, time.Minute, wait.NeverStop) + return store } -func (c *objectCache) newStore() cache.Store { +func (c *objectCache) newStore() *cacheStore { // TODO: We may consider created a dedicated store keeping just a single // item, instead of using a generic store implementation for this purpose. // However, simple benchmarks show that memory overhead in that case is // decrease from ~600B to ~300B per object. So we are not optimizing it // until we will see a good reason for that. - return cache.NewStore(cache.MetaNamespaceKeyFunc) + store := cache.NewStore(cache.MetaNamespaceKeyFunc) + return &cacheStore{store, sync.Mutex{}, false} } func (c *objectCache) newReflector(namespace, name string) *objectCacheItem { @@ -124,14 +226,15 @@ func (c *objectCache) newReflector(namespace, name string) *objectCacheItem { store, 0, ) - stopCh := make(chan struct{}) - go reflector.Run(stopCh) - return &objectCacheItem{ + item := &objectCacheItem{ refCount: 0, store: store, - hasSynced: func() (bool, error) { return reflector.LastSyncResourceVersion() != "", nil }, - stopCh: stopCh, + reflector: reflector, + hasSynced: func() (bool, error) { return store.hasSynced(), nil }, + stopCh: make(chan struct{}), } + go item.startReflector() + return item } func (c *objectCache) AddReference(namespace, name string) { @@ -186,10 +289,11 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { if !exists { return nil, fmt.Errorf("object %q/%q not registered", namespace, name) } + item.restartReflectorIfNeeded() if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil { return nil, fmt.Errorf("failed to sync %s cache: %v", c.groupResource.String(), err) } - + item.setLastAccessTime(c.clock.Now()) obj, exists, err := item.store.GetByKey(c.key(namespace, name)) if err != nil { return nil, err @@ -209,6 +313,7 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { // - doing that would require significant refactoring to reflector // we limit ourselves to just quickly stop the reflector here. if utilfeature.DefaultFeatureGate.Enabled(features.ImmutableEphemeralVolumes) && c.isImmutable(object) { + item.setImmutable() if item.stop() { klog.V(4).Infof("Stopped watching for changes of %q/%q - object is immutable", namespace, name) } @@ -218,6 +323,17 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { return nil, fmt.Errorf("unexpected object type: %v", obj) } +func (c *objectCache) startRecycleIdleWatch() { + c.lock.Lock() + defer c.lock.Unlock() + + for key, item := range c.items { + if item.stopIfIdle(c.clock.Now(), c.maxIdleTime) { + klog.V(4).InfoS("Not acquired for long time, Stopped watching for changes", "objectKey", key, "maxIdleTime", c.maxIdleTime) + } + } +} + // NewWatchBasedManager creates a manager that keeps a cache of all objects // necessary for registered pods. // It implements the following logic: @@ -230,7 +346,15 @@ func NewWatchBasedManager( newObject newObjectFunc, isImmutable isImmutableFunc, groupResource schema.GroupResource, + resyncInterval time.Duration, getReferencedObjects func(*v1.Pod) sets.String) Manager { - objectStore := NewObjectCache(listObject, watchObject, newObject, isImmutable, groupResource) + + // If a configmap/secret is used as a volume, the volumeManager will visit the objectCacheItem every resyncInterval cycle, + // We just want to stop the objectCacheItem referenced by environment variables, + // So, maxIdleTime is set to an integer multiple of resyncInterval, + // We currently set it to 5 times. + maxIdleTime := resyncInterval * 5 + + objectStore := NewObjectCache(listObject, watchObject, newObject, isImmutable, groupResource, clock.RealClock{}, maxIdleTime) return NewCacheBasedManager(objectStore, getReferencedObjects) } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index 47b449e37148..6ad75b924ecf 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -318,7 +318,7 @@ func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, func (ep *endpointsInfo) Cleanup() { Log(ep, "Endpoint Cleanup", 3) - if ep.refCount != nil { + if !ep.GetIsLocal() && ep.refCount != nil { *ep.refCount-- // Remove the remote hns endpoint, if no service is referring it @@ -374,7 +374,9 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service } for _, ingress := range service.Status.LoadBalancer.Ingress { - info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP}) + if net.ParseIP(ingress.IP) != nil { + info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP}) + } } return info } @@ -1118,10 +1120,10 @@ func (proxier *Proxier) syncProxyRules() { } else { // We only share the refCounts for remote endpoints ep.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) + *ep.refCount++ } ep.hnsID = newHnsEndpoint.hnsID - *ep.refCount++ Log(ep, "Endpoint resource found", 3) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index c6fb49192e86..e6fe7ff9d5bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -373,6 +373,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) { // writePayloadToDir writes the given payload to the given directory. The // directory must exist. func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error { + isNotWindows := runtime.GOOS != "windows" for userVisiblePath, fileProjection := range payload { content := fileProjection.Data mode := os.FileMode(fileProjection.Mode) @@ -400,9 +401,11 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir if fileProjection.FsUser == nil { continue } - if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil { - klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err) - return err + if isNotWindows { + if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil { + klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err) + return err + } } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label/admission.go index 36235e21792f..940fdd92cd6e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label/admission.go @@ -24,9 +24,12 @@ import ( "io" "sync" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/informers" cloudprovider "k8s.io/cloud-provider" cloudvolume "k8s.io/cloud-provider/volume" volumehelpers "k8s.io/cloud-provider/volume/helpers" @@ -51,12 +54,14 @@ func Register(plugins *admission.Plugins) { } var _ = admission.Interface(&persistentVolumeLabel{}) +var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&persistentVolumeLabel{}) type persistentVolumeLabel struct { *admission.Handler mutex sync.Mutex cloudConfig []byte + sharedInformer informers.SharedInformerFactory awsPVLabeler cloudprovider.PVLabeler gcePVLabeler cloudprovider.PVLabeler azurePVLabeler cloudprovider.PVLabeler @@ -86,6 +91,20 @@ func (l *persistentVolumeLabel) SetCloudConfig(cloudConfig []byte) { l.cloudConfig = cloudConfig } +func (l *persistentVolumeLabel) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + secretInformer := f.Core().V1().Secrets() + l.sharedInformer = f + l.SetReadyFunc(secretInformer.Informer().HasSynced) +} + +// ValidateInitialization ensures lister is set. +func (l *persistentVolumeLabel) ValidateInitialization() error { + if l.sharedInformer == nil { + return fmt.Errorf("missing shared informer") + } + return nil +} + func nodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []api.NodeSelectorRequirement, terms []api.NodeSelectorTerm) bool { for _, req := range reqs { for _, term := range terms { @@ -380,6 +399,11 @@ func (l *persistentVolumeLabel) getOpenStackPVLabeler() (cloudprovider.PVLabeler return nil, err } + cloudProviderWithInformer, ok := cloudProvider.(cloudprovider.InformerUser) + if ok { + cloudProviderWithInformer.SetInformers(l.sharedInformer) + } + openStackPVLabeler, ok := cloudProvider.(cloudprovider.PVLabeler) if !ok { return nil, errors.New("OpenStack cloud provider does not implement PV labeling") diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD index f72675fb0cca..0fdede5c377e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD @@ -25,6 +25,7 @@ go_library( "health_handlers.go", "namespace.go", "protocol.go", + "request_timeout.go", "resource_quota.go", "server_version.go", "table_conversion.go", @@ -75,6 +76,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/apihelpers:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/flowcontrol.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/flowcontrol.go index 2ba7512d014a..3b3f71a05802 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/flowcontrol.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/flowcontrol.go @@ -19,6 +19,7 @@ package apimachinery import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -32,39 +33,60 @@ import ( flowcontrol "k8s.io/api/flowcontrol/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/util/apihelpers" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" clientsideflowcontrol "k8s.io/client-go/util/flowcontrol" "k8s.io/kubernetes/test/e2e/framework" ) const ( - requestConcurrencyLimitMetricName = "apiserver_flowcontrol_request_concurrency_limit" - requestConcurrencyLimitMetricLabelName = "priority_level" + requestConcurrencyLimitMetricName = "apiserver_flowcontrol_request_concurrency_limit" + priorityLevelLabelName = "priority_level" +) + +var ( + errPriorityLevelNotFound = errors.New("cannot find a metric sample with a matching priority level name label") ) var _ = SIGDescribe("API priority and fairness", func() { f := framework.NewDefaultFramework("apf") - ginkgo.It("should ensure that requests can be classified by testing flow-schemas/priority-levels", func() { + ginkgo.It("should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration", func() { testingFlowSchemaName := "e2e-testing-flowschema" testingPriorityLevelName := "e2e-testing-prioritylevel" matchingUsername := "noxu" nonMatchingUsername := "foo" - ginkgo.By("creating a testing prioritylevel") + ginkgo.By("creating a testing PriorityLevelConfiguration object") createdPriorityLevel, cleanup := createPriorityLevel(f, testingPriorityLevelName, 1) defer cleanup() - ginkgo.By("creating a testing flowschema") + ginkgo.By("creating a testing FlowSchema object") createdFlowSchema, cleanup := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername}) defer cleanup() - ginkgo.By("checking response headers contain flow-schema/priority-level uid") - if !testResponseHeaderMatches(f, matchingUsername, string(createdPriorityLevel.UID), string(createdFlowSchema.UID)) { - framework.Failf("matching user doesnt received UID for the testing priority-level and flow-schema") + ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") + waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName) + + var response *http.Response + ginkgo.By("response headers should contain the UID of the appropriate FlowSchema and PriorityLevelConfiguration for a matching user") + response = makeRequest(f, matchingUsername) + if plUIDWant, plUIDGot := string(createdPriorityLevel.UID), getPriorityLevelUID(response); plUIDWant != plUIDGot { + framework.Failf("expected PriorityLevelConfiguration UID in the response header: %s, but got: %s, response header: %#v", plUIDWant, plUIDGot, response.Header) + } + if fsUIDWant, fsUIDGot := string(createdFlowSchema.UID), getFlowSchemaUID(response); fsUIDWant != fsUIDGot { + framework.Failf("expected FlowSchema UID in the response header: %s, but got: %s, response header: %#v", fsUIDWant, fsUIDGot, response.Header) + } + + ginkgo.By("response headers should contain non-empty UID of FlowSchema and PriorityLevelConfiguration for a non-matching user") + response = makeRequest(f, nonMatchingUsername) + if plUIDGot := getPriorityLevelUID(response); plUIDGot == "" { + framework.Failf("expected a non-empty PriorityLevelConfiguration UID in the response header, but got: %s, response header: %#v", plUIDGot, response.Header) } - if testResponseHeaderMatches(f, nonMatchingUsername, string(createdPriorityLevel.UID), string(createdPriorityLevel.UID)) { - framework.Failf("non-matching user unexpectedly received UID for the testing priority-level and flow-schema") + if fsUIDGot := getFlowSchemaUID(response); fsUIDGot == "" { + framework.Failf("expected a non-empty FlowSchema UID in the response header but got: %s, response header: %#v", fsUIDGot, response.Header) } }) @@ -115,11 +137,15 @@ var _ = SIGDescribe("API priority and fairness", func() { framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName) _, cleanup = createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username}) defer cleanup() + + ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") + waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName) } ginkgo.By("getting request concurrency from metrics") for i := range clients { - realConcurrency := getPriorityLevelConcurrency(f, clients[i].priorityLevelName) + realConcurrency, err := getPriorityLevelConcurrency(f.ClientSet, clients[i].priorityLevelName) + framework.ExpectNoError(err) clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) if clients[i].concurrency < 1 { clients[i].concurrency = 1 @@ -174,6 +200,9 @@ var _ = SIGDescribe("API priority and fairness", func() { _, cleanup = createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName}) defer cleanup() + ginkgo.By("waiting for testing flow schema and priority level to reach steady state") + waitForSteadyState(f, flowSchemaName, priorityLevelName) + type client struct { username string qps float64 @@ -188,7 +217,8 @@ var _ = SIGDescribe("API priority and fairness", func() { } framework.Logf("getting real concurrency") - realConcurrency := getPriorityLevelConcurrency(f, priorityLevelName) + realConcurrency, err := getPriorityLevelConcurrency(f.ClientSet, priorityLevelName) + framework.ExpectNoError(err) for i := range clients { clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) if clients[i].concurrency < 1 { @@ -248,10 +278,11 @@ func createPriorityLevel(f *framework.Framework, priorityLevelName string, assur } } -//lint:ignore U1000 function is actually referenced -func getPriorityLevelConcurrency(f *framework.Framework, priorityLevelName string) int32 { - resp, err := f.ClientSet.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(context.TODO()) - framework.ExpectNoError(err) +func getPriorityLevelConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) { + resp, err := c.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(context.TODO()) + if err != nil { + return 0, err + } sampleDecoder := expfmt.SampleDecoder{ Dec: expfmt.NewDecoder(bytes.NewBuffer(resp), expfmt.FmtText), Opts: &expfmt.DecodeOptions{}, @@ -259,22 +290,23 @@ func getPriorityLevelConcurrency(f *framework.Framework, priorityLevelName strin for { var v model.Vector err := sampleDecoder.Decode(&v) - if err == io.EOF { - break + if err != nil { + if err == io.EOF { + break + } + return 0, err } - framework.ExpectNoError(err) for _, metric := range v { if string(metric.Metric[model.MetricNameLabel]) != requestConcurrencyLimitMetricName { continue } - if string(metric.Metric[requestConcurrencyLimitMetricLabelName]) != priorityLevelName { + if string(metric.Metric[priorityLevelLabelName]) != priorityLevelName { continue } - return int32(metric.Value) + return int32(metric.Value), nil } } - framework.ExpectNoError(fmt.Errorf("cannot find metric %q with matching priority level name label %q", requestConcurrencyLimitMetricName, priorityLevelName)) - return 0 + return 0, errPriorityLevelNotFound } // createFlowSchema creates a flow schema referring to a particular priority @@ -324,6 +356,35 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre } } +// waitForSteadyState repeatedly polls the API server to check if the newly +// created flow schema and priority level have been seen by the APF controller +// by checking: (1) the dangling priority level reference condition in the flow +// schema status, and (2) metrics. The function times out after 30 seconds. +func waitForSteadyState(f *framework.Framework, flowSchemaName string, priorityLevelName string) { + framework.ExpectNoError(wait.Poll(time.Second, 30*time.Second, func() (bool, error) { + fs, err := f.ClientSet.FlowcontrolV1beta1().FlowSchemas().Get(context.TODO(), flowSchemaName, metav1.GetOptions{}) + if err != nil { + return false, err + } + condition := apihelpers.GetFlowSchemaConditionByType(fs, flowcontrol.FlowSchemaConditionDangling) + if condition == nil || condition.Status != flowcontrol.ConditionFalse { + // The absence of the dangling status object implies that the APF + // controller isn't done with syncing the flow schema object. And, of + // course, the condition being anything but false means that steady state + // hasn't been achieved. + return false, nil + } + _, err = getPriorityLevelConcurrency(f.ClientSet, priorityLevelName) + if err != nil { + if err == errPriorityLevelNotFound { + return false, nil + } + return false, err + } + return true, nil + })) +} + // makeRequests creates a request to the API server and returns the response. func makeRequest(f *framework.Framework, username string) *http.Response { config := f.ClientConfig() @@ -341,15 +402,12 @@ func makeRequest(f *framework.Framework, username string) *http.Response { return response } -func testResponseHeaderMatches(f *framework.Framework, impersonatingUser, plUID, fsUID string) bool { - response := makeRequest(f, impersonatingUser) - if response.Header.Get(flowcontrol.ResponseHeaderMatchedFlowSchemaUID) != fsUID { - return false - } - if response.Header.Get(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID) != plUID { - return false - } - return true +func getPriorityLevelUID(response *http.Response) string { + return response.Header.Get(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID) +} + +func getFlowSchemaUID(response *http.Response) string { + return response.Header.Get(flowcontrol.ResponseHeaderMatchedFlowSchemaUID) } // uniformQPSLoadSingle loads the API server with requests at a uniform diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/request_timeout.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/request_timeout.go new file mode 100644 index 000000000000..c6e1b4138350 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/request_timeout.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apimachinery + +import ( + "io/ioutil" + "net/http" + "strings" + + "github.com/onsi/ginkgo" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + invalidTimeoutMessageExpected = "invalid timeout specified in the request URL" +) + +var _ = SIGDescribe("Server request timeout", func() { + f := framework.NewDefaultFramework("request-timeout") + + ginkgo.It("should return HTTP status code 400 if the user specifies an invalid timeout in the request URL", func() { + rt := getRoundTripper(f) + req := newRequest(f, "invalid") + + response, err := rt.RoundTrip(req) + framework.ExpectNoError(err) + defer response.Body.Close() + + if response.StatusCode != http.StatusBadRequest { + framework.Failf("expected HTTP status code: %d, but got: %d", http.StatusBadRequest, response.StatusCode) + } + + messageGot := readBody(response) + if !strings.Contains(messageGot, invalidTimeoutMessageExpected) { + framework.Failf("expected HTTP status message to contain: %s, but got: %s", invalidTimeoutMessageExpected, messageGot) + } + }) + + ginkgo.It("the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed", func() { + rt := getRoundTripper(f) + // Choose a timeout that exceeds the default timeout (60s) enforced by the apiserver + req := newRequest(f, "3m") + + response, err := rt.RoundTrip(req) + framework.ExpectNoError(err) + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + framework.Failf("expected HTTP status code: %d, but got: %d", http.StatusOK, response.StatusCode) + } + }) + + ginkgo.It("default timeout should be used if the specified timeout in the request URL is 0s", func() { + rt := getRoundTripper(f) + req := newRequest(f, "0s") + + response, err := rt.RoundTrip(req) + framework.ExpectNoError(err) + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + framework.Failf("expected HTTP status code: %d, but got: %d", http.StatusOK, response.StatusCode) + } + }) +}) + +func getRoundTripper(f *framework.Framework) http.RoundTripper { + config := rest.CopyConfig(f.ClientConfig()) + + // ensure we don't enforce any transport timeout from the client side. + config.Timeout = 0 + + roundTripper, err := rest.TransportFor(config) + framework.ExpectNoError(err) + + return roundTripper +} + +func newRequest(f *framework.Framework, timeout string) *http.Request { + req, err := http.NewRequest(http.MethodGet, f.ClientSet.CoreV1().RESTClient().Get(). + Param("timeout", timeout).AbsPath("version").URL().String(), nil) + framework.ExpectNoError(err) + + return req +} + +func readBody(response *http.Response) string { + raw, err := ioutil.ReadAll(response.Body) + framework.ExpectNoError(err) + + return string(raw) +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go index 3e0571c9e694..db79f6f989ba 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go @@ -338,8 +338,8 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Ensuring resource quota status captures configMap creation") usedResources = v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) - // we expect there to be two configmaps because each namespace will receive - // a ca.crt configmap by default. + // we expect there to be three configmaps because each namespace will receive + // a ca.crt configmap and an openshift-service-ca.crt configmap by default. // ref:https://github.com/kubernetes/kubernetes/pull/68812 usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) @@ -1478,7 +1478,10 @@ func newTestResourceQuota(name string) *v1.ResourceQuota { hard[v1.ResourceQuotas] = resource.MustParse("1") hard[v1.ResourceCPU] = resource.MustParse("1") hard[v1.ResourceMemory] = resource.MustParse("500Mi") - hard[v1.ResourceConfigMaps] = resource.MustParse("2") + // Update configmap quota from 2 to 3 to account for the addition of + // openshift-service-ca.crt configmap to every namespace to allow + // BoundServiceAccountTokenVolume to be enabled in 4.8. + hard[v1.ResourceConfigMaps] = resource.MustParse("3") hard[v1.ResourceSecrets] = resource.MustParse("10") hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10") hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi") diff --git a/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go b/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go index 3d59714d20b9..6c772a34bc99 100644 --- a/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go +++ b/vendor/k8s.io/legacy-cloud-providers/openstack/openstack_volumes.go @@ -739,16 +739,16 @@ func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo return nil, nil } - // Get metadata - md, err := getMetadata(os.metadataOpts.SearchOrder) + // Get Volume + volume, err := os.getVolume(pv.Spec.Cinder.VolumeID) if err != nil { return nil, err } // Construct Volume Labels labels := make(map[string]string) - if md.AvailabilityZone != "" { - labels[v1.LabelFailureDomainBetaZone] = md.AvailabilityZone + if volume.AvailabilityZone != "" { + labels[v1.LabelFailureDomainBetaZone] = volume.AvailabilityZone } if os.region != "" { labels[v1.LabelFailureDomainBetaRegion] = os.region diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go new file mode 100644 index 000000000000..603ecde9b751 --- /dev/null +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go @@ -0,0 +1,209 @@ +// +build !providerless + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog/v2" + "k8s.io/legacy-cloud-providers/vsphere/vclib" +) + +type sharedDatastore struct { + nodeManager *NodeManager + candidateDatastores []*vclib.DatastoreInfo +} + +type hostInfo struct { + hostUUID string + hostMOID string + datacenter string +} + +const ( + summary = "summary" + runtimeHost = "summary.runtime.host" + hostsProperty = "host" + nameProperty = "name" +) + +func (shared *sharedDatastore) getSharedDatastore(ctcx context.Context) (*vclib.DatastoreInfo, error) { + nodes := shared.nodeManager.getNodes() + + // Segregate nodes according to VC-DC + dcNodes := make(map[string][]NodeInfo) + nodeHosts := make(map[string]hostInfo) + + for nodeName, node := range nodes { + nodeInfo, err := shared.nodeManager.GetNodeInfoWithNodeObject(node) + if err != nil { + return nil, fmt.Errorf("unable to find node %s: %v", nodeName, err) + } + vcDC := nodeInfo.vcServer + nodeInfo.dataCenter.String() + dcNodes[vcDC] = append(dcNodes[vcDC], nodeInfo) + } + + for vcDC, nodes := range dcNodes { + var hostInfos []hostInfo + var err error + hostInfos, err = shared.getNodeHosts(ctcx, nodes, vcDC) + if err != nil { + if vclib.IsManagedObjectNotFoundError(err) { + klog.Warningf("SharedHost.getSharedDatastore: batch fetching of hosts failed - switching to fetching them individually.") + hostInfos, err = shared.getEachNodeHost(ctcx, nodes, vcDC) + if err != nil { + klog.Errorf("SharedHost.getSharedDatastore: error fetching node hosts individually: %v", err) + return nil, err + } + } else { + return nil, err + } + } + for _, host := range hostInfos { + hostDCName := fmt.Sprintf("%s/%s", host.datacenter, host.hostMOID) + nodeHosts[hostDCName] = host + } + } + + if len(nodeHosts) < 1 { + msg := fmt.Sprintf("SharedHost.getSharedDatastore unable to find hosts associated with nodes") + klog.Error(msg) + return nil, fmt.Errorf("") + } + + for _, datastoreInfo := range shared.candidateDatastores { + dataStoreHosts, err := shared.getAttachedHosts(ctcx, datastoreInfo.Datastore) + if err != nil { + msg := fmt.Sprintf("error finding attached hosts to datastore %s: %v", datastoreInfo.Name(), err) + klog.Error(msg) + return nil, fmt.Errorf(msg) + } + if shared.isIncluded(dataStoreHosts, nodeHosts) { + return datastoreInfo, nil + } + } + return nil, fmt.Errorf("SharedHost.getSharedDatastore: unable to find any shared datastores") +} + +// check if all of the nodeHosts are included in the dataStoreHosts +func (shared *sharedDatastore) isIncluded(dataStoreHosts []hostInfo, nodeHosts map[string]hostInfo) bool { + result := true + for _, host := range nodeHosts { + hostFound := false + for _, targetHost := range dataStoreHosts { + if host.hostUUID == targetHost.hostUUID && host.hostMOID == targetHost.hostMOID { + hostFound = true + } + } + if !hostFound { + result = false + } + } + return result +} + +func (shared *sharedDatastore) getEachNodeHost(ctx context.Context, nodes []NodeInfo, dcVC string) ([]hostInfo, error) { + var hosts []hostInfo + for _, node := range nodes { + host, err := node.vm.GetHost(ctx) + if err != nil { + klog.Errorf("SharedHost.getEachNodeHost: unable to find host for vm %s: %v", node.vm.InventoryPath, err) + return nil, err + } + hosts = append(hosts, hostInfo{ + hostUUID: host.Summary.Hardware.Uuid, + hostMOID: host.Summary.Host.String(), + datacenter: node.dataCenter.String(), + }) + } + return hosts, nil +} + +func (shared *sharedDatastore) getNodeHosts(ctx context.Context, nodes []NodeInfo, dcVC string) ([]hostInfo, error) { + var vmRefs []types.ManagedObjectReference + if len(nodes) < 1 { + return nil, fmt.Errorf("no nodes found for dc-vc: %s", dcVC) + } + var nodeInfo NodeInfo + for _, n := range nodes { + nodeInfo = n + vmRefs = append(vmRefs, n.vm.Reference()) + } + pc := property.DefaultCollector(nodeInfo.dataCenter.Client()) + var vmoList []mo.VirtualMachine + err := pc.Retrieve(ctx, vmRefs, []string{nameProperty, runtimeHost}, &vmoList) + if err != nil { + klog.Errorf("SharedHost.getNodeHosts: unable to fetch vms from datacenter %s: %w", nodeInfo.dataCenter.String(), err) + return nil, err + } + var hostMoList []mo.HostSystem + var hostRefs []types.ManagedObjectReference + for _, vmo := range vmoList { + if vmo.Summary.Runtime.Host == nil { + msg := fmt.Sprintf("SharedHost.getNodeHosts: no host associated with vm %s", vmo.Name) + klog.Error(msg) + return nil, fmt.Errorf(msg) + } + hostRefs = append(hostRefs, vmo.Summary.Runtime.Host.Reference()) + } + pc = property.DefaultCollector(nodeInfo.dataCenter.Client()) + err = pc.Retrieve(ctx, hostRefs, []string{summary}, &hostMoList) + if err != nil { + klog.Errorf("SharedHost.getNodeHosts: unable to fetch hosts from datacenter %s: %w", nodeInfo.dataCenter.String(), err) + return nil, err + } + var hosts []hostInfo + for _, host := range hostMoList { + hosts = append(hosts, hostInfo{hostMOID: host.Summary.Host.String(), hostUUID: host.Summary.Hardware.Uuid, datacenter: nodeInfo.dataCenter.String()}) + } + return hosts, nil +} + +func (shared *sharedDatastore) getAttachedHosts(ctx context.Context, datastore *vclib.Datastore) ([]hostInfo, error) { + var ds mo.Datastore + + pc := property.DefaultCollector(datastore.Client()) + err := pc.RetrieveOne(ctx, datastore.Reference(), []string{hostsProperty}, &ds) + if err != nil { + return nil, err + } + + mounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount) + var refs []types.ManagedObjectReference + for _, host := range ds.Host { + refs = append(refs, host.Key) + mounts[host.Key] = host + } + + var hs []mo.HostSystem + err = pc.Retrieve(ctx, refs, []string{summary}, &hs) + if err != nil { + return nil, err + } + var hosts []hostInfo + for _, h := range hs { + hosts = append(hosts, hostInfo{hostUUID: h.Summary.Hardware.Uuid, hostMOID: h.Summary.Host.String()}) + } + return hosts, nil + +} diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/virtualmachine.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/virtualmachine.go index 7fc6f1750dbe..a5e170438ad1 100644 --- a/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/virtualmachine.go +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib/virtualmachine.go @@ -166,6 +166,24 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol return diskUUID, nil } +// GetHost returns host of the virtual machine +func (vm *VirtualMachine) GetHost(ctx context.Context) (mo.HostSystem, error) { + host, err := vm.HostSystem(ctx) + var hostSystemMo mo.HostSystem + if err != nil { + klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) + return hostSystemMo, err + } + + s := object.NewSearchIndex(vm.Client()) + err = s.Properties(ctx, host.Reference(), []string{"summary"}, &hostSystemMo) + if err != nil { + klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) + return hostSystemMo, err + } + return hostSystemMo, nil +} + // DetachDisk detaches the disk specified by vmDiskPath func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error { device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath) diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go index 83337adc7c20..874320f013c3 100644 --- a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go @@ -1385,13 +1385,20 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo if len(zonesToSearch) == 0 { // If zone is not provided, get the shared datastore across all node VMs. klog.V(4).Infof("Validating if datastore %s is shared across all node VMs", datastoreName) - sharedDsList, err = getSharedDatastoresInK8SCluster(ctx, vs.nodeManager) + sharedDSFinder := &sharedDatastore{ + nodeManager: vs.nodeManager, + candidateDatastores: candidateDatastoreInfos, + } + datastoreInfo, err = sharedDSFinder.getSharedDatastore(ctx) if err != nil { klog.Errorf("Failed to get shared datastore: %+v", err) return "", err } - // Prepare error msg to be used later, if required. - err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastoreName) + if datastoreInfo == nil { + err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastoreName) + klog.Error(err) + return "", err + } } else { // If zone is provided, get the shared datastores in that zone. klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastoreName, zonesToSearch) @@ -1400,21 +1407,19 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", zonesToSearch, err) return "", err } - // Prepare error msg to be used later, if required. - err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastoreName, zonesToSearch) - } - found := false - // Check if the selected datastore belongs to the list of shared datastores computed. - for _, sharedDs := range sharedDsList { - if datastoreInfo, found = candidateDatastores[sharedDs.Info.Url]; found { - klog.V(4).Infof("Datastore validation succeeded") - found = true - break + found := false + for _, sharedDs := range sharedDsList { + if datastoreInfo, found = candidateDatastores[sharedDs.Info.Url]; found { + klog.V(4).Infof("Datastore validation succeeded") + found = true + break + } + } + if !found { + err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastoreName, zonesToSearch) + klog.Error(err) + return "", err } - } - if !found { - klog.Error(err) - return "", err } } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 3e53220a3418..37b6997cbf80 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1379,7 +1379,7 @@ gopkg.in/warnings.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gopkg.in/yaml.v3 -# k8s.io/api v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/api v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1427,7 +1427,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/apiextensions-apiserver v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1471,7 +1471,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/apimachinery v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1535,7 +1535,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/apiserver v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1676,7 +1676,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/cli-runtime v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize @@ -1690,7 +1690,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/client-go v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/client-go/discovery k8s.io/client-go/discovery/cached @@ -1941,7 +1941,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210701171757-c3d8a1c6948d k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/node/helpers @@ -1949,12 +1949,12 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210701171757-c3d8a1c6948d k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/component-base v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -1977,25 +1977,25 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/term k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/auth/rbac/reconciliation k8s.io/component-helpers/auth/rbac/validation k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/controller-manager v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/controller-manager v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210701171757-c3d8a1c6948d k8s.io/controller-manager/pkg/clientbuilder -# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210701171757-c3d8a1c6948d k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210701171757-c3d8a1c6948d k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog/v2 v2.4.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kube-aggregator v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210701171757-c3d8a1c6948d k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -2039,11 +2039,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210701171757-c3d8a1c6948d k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210701171757-c3d8a1c6948d k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kubectl v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2067,7 +2067,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kubelet v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/kubelet/config/v1alpha1 k8s.io/kubelet/config/v1beta1 @@ -2079,7 +2079,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.20.0 => github.com/openshift/kubernetes v1.20.1-0.20210205160848-1fb0b6aa7392 +# k8s.io/kubernetes v1.20.0 => github.com/openshift/kubernetes v1.20.1-0.20210701171757-c3d8a1c6948d ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -2817,7 +2817,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/legacy-cloud-providers v0.20.0 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210701171757-c3d8a1c6948d ## explicit k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure @@ -2860,7 +2860,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210701171757-c3d8a1c6948d k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 @@ -2869,9 +2869,9 @@ k8s.io/metrics/pkg/apis/external_metrics/v1beta1 k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics/scheme k8s.io/metrics/pkg/client/external_metrics -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210701171757-c3d8a1c6948d k8s.io/mount-utils -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210701171757-c3d8a1c6948d k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 # k8s.io/utils v0.0.0-20201110183641-67b214c5f920 @@ -2929,34 +2929,34 @@ sigs.k8s.io/yaml # github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20201214145556-6f1013f42f98 # github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 # github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210701171757-c3d8a1c6948d # k8s.io/gengo => k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 # k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 # k8s.io/klog => k8s.io/klog v1.0.0 -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.1-0.20210205160848-1fb0b6aa7392 -# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210205160848-1fb0b6aa7392 -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210205160848-1fb0b6aa7392 +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.1-0.20210701171757-c3d8a1c6948d +# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210701171757-c3d8a1c6948d +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210701171757-c3d8a1c6948d # k8s.io/system-validators => k8s.io/system-validators v1.0.4