@@ -12,6 +12,7 @@ import (
1212 "github.com/openshift/origin/test/extended/util"
1313 corev1 "k8s.io/api/core/v1"
1414 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+ nodeutil "k8s.io/kubernetes/pkg/util/node"
1516 "k8s.io/kubernetes/test/e2e/framework"
1617)
1718
@@ -52,7 +53,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
5253 g .By ("Ensuring both nodes are healthy before starting kubelet disruption test" )
5354 for _ , node := range nodes {
5455 o .Eventually (func () bool {
55- return isNodeReady (oc , node .Name )
56+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), node .Name , metav1.GetOptions {})
57+ if err != nil {
58+ framework .Logf ("Error getting node %s: %v" , node .Name , err )
59+ return false
60+ }
61+ return nodeutil .IsNodeReady (nodeObj )
5662 }, nodeIsHealthyTimeout , pollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s should be ready before kubelet disruption" , node .Name ))
5763 }
5864
@@ -90,7 +96,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
9096 g .By ("Cleanup: Waiting for all nodes to become Ready after constraint cleanup" )
9197 for _ , node := range nodes {
9298 o .Eventually (func () bool {
93- return isNodeReady (oc , node .Name )
99+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), node .Name , metav1.GetOptions {})
100+ if err != nil {
101+ framework .Logf ("Error getting node %s: %v" , node .Name , err )
102+ return false
103+ }
104+ return nodeutil .IsNodeReady (nodeObj )
94105 }, kubeletRestoreTimeout , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s should be Ready after cleanup" , node .Name ))
95106 }
96107 }
@@ -112,12 +123,22 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
112123
113124 g .By ("Checking that the node is not in state Ready due to kubelet constraint" )
114125 o .Eventually (func () bool {
115- return ! isNodeReady (oc , targetNode .Name )
126+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), targetNode .Name , metav1.GetOptions {})
127+ if err != nil {
128+ framework .Logf ("Error getting node %s: %v" , targetNode .Name , err )
129+ return false
130+ }
131+ return ! nodeutil .IsNodeReady (nodeObj )
116132 }, kubeletDisruptionTimeout , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s is not in state Ready after kubelet constraint is applied" , targetNode .Name ))
117133
118134 g .By (fmt .Sprintf ("Ensuring surviving node %s remains Ready during kubelet disruption" , survivingNode .Name ))
119135 o .Consistently (func () bool {
120- return isNodeReady (oc , survivingNode .Name )
136+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), survivingNode .Name , metav1.GetOptions {})
137+ if err != nil {
138+ framework .Logf ("Error getting node %s: %v" , survivingNode .Name , err )
139+ return false
140+ }
141+ return nodeutil .IsNodeReady (nodeObj )
121142 }, 2 * time .Minute , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Surviving node %s should remain Ready during kubelet disruption" , survivingNode .Name ))
122143
123144 g .By ("Validating etcd cluster remains healthy with surviving node" )
@@ -131,13 +152,23 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
131152
132153 g .By ("Waiting for target node to become Ready after kubelet constraint removal" )
133154 o .Eventually (func () bool {
134- return isNodeReady (oc , targetNode .Name )
155+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), targetNode .Name , metav1.GetOptions {})
156+ if err != nil {
157+ framework .Logf ("Error getting node %s: %v" , targetNode .Name , err )
158+ return false
159+ }
160+ return nodeutil .IsNodeReady (nodeObj )
135161 }, kubeletRestoreTimeout , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s should become Ready after kubelet constraint removal" , targetNode .Name ))
136162
137163 g .By ("Validating both nodes are Ready after kubelet constraint removal" )
138164 for _ , node := range nodes {
139165 o .Eventually (func () bool {
140- return isNodeReady (oc , node .Name )
166+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), node .Name , metav1.GetOptions {})
167+ if err != nil {
168+ framework .Logf ("Error getting node %s: %v" , node .Name , err )
169+ return false
170+ }
171+ return nodeutil .IsNodeReady (nodeObj )
141172 }, kubeletRestoreTimeout , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s should be Ready after kubelet constraint removal" , node .Name ))
142173 }
143174
@@ -185,7 +216,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
185216 g .By ("Validating both nodes are Ready after kubelet service automatic restart" )
186217 for _ , node := range nodes {
187218 o .Eventually (func () bool {
188- return isNodeReady (oc , node .Name )
219+ nodeObj , err := oc .AdminKubeClient ().CoreV1 ().Nodes ().Get (context .Background (), node .Name , metav1.GetOptions {})
220+ if err != nil {
221+ framework .Logf ("Error getting node %s: %v" , node .Name , err )
222+ return false
223+ }
224+ return nodeutil .IsNodeReady (nodeObj )
189225 }, kubeletRestoreTimeout , kubeletPollInterval ).Should (o .BeTrue (), fmt .Sprintf ("Node %s should be Ready after kubelet automatic restart" , node .Name ))
190226 }
191227
0 commit comments