Skip to content

Commit 6ea9bee

Browse files
committed
merge only my changes
1 parent ef11ac7 commit 6ea9bee

File tree

2 files changed

+43
-24
lines changed

2 files changed

+43
-24
lines changed

test/extended/two_node/common.go

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import (
88

99
v1 "github.com/openshift/api/config/v1"
1010
exutil "github.com/openshift/origin/test/extended/util"
11-
corev1 "k8s.io/api/core/v1"
1211
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1312
"k8s.io/kubernetes/test/e2e/framework"
1413
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@@ -188,22 +187,6 @@ func isServiceRunning(oc *exutil.CLI, nodeName string, serviceName string) bool
188187
return err == nil
189188
}
190189

191-
// isNodeReady checks if a node is in Ready state
192-
func isNodeReady(oc *exutil.CLI, nodeName string) bool {
193-
node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
194-
if err != nil {
195-
framework.Logf("Error getting node %s: %v", nodeName, err)
196-
return false
197-
}
198-
199-
for _, condition := range node.Status.Conditions {
200-
if condition.Type == corev1.NodeReady {
201-
return condition.Status == corev1.ConditionTrue
202-
}
203-
}
204-
return false
205-
}
206-
207190
// validateClusterOperatorsAvailable ensures all cluster operators are available
208191
func validateClusterOperatorsAvailable(oc *exutil.CLI) error {
209192
clusterOperators, err := oc.AdminConfigClient().ConfigV1().ClusterOperators().List(context.Background(), metav1.ListOptions{})

test/extended/two_node/tnf_kubelet_disruption.go

Lines changed: 43 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"github.com/openshift/origin/test/extended/util"
1313
corev1 "k8s.io/api/core/v1"
1414
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+
nodeutil "k8s.io/kubernetes/pkg/util/node"
1516
"k8s.io/kubernetes/test/e2e/framework"
1617
)
1718

@@ -52,7 +53,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
5253
g.By("Ensuring both nodes are healthy before starting kubelet disruption test")
5354
for _, node := range nodes {
5455
o.Eventually(func() bool {
55-
return isNodeReady(oc, node.Name)
56+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
57+
if err != nil {
58+
framework.Logf("Error getting node %s: %v", node.Name, err)
59+
return false
60+
}
61+
return nodeutil.IsNodeReady(nodeObj)
5662
}, nodeIsHealthyTimeout, pollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s should be ready before kubelet disruption", node.Name))
5763
}
5864

@@ -90,7 +96,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
9096
g.By("Cleanup: Waiting for all nodes to become Ready after constraint cleanup")
9197
for _, node := range nodes {
9298
o.Eventually(func() bool {
93-
return isNodeReady(oc, node.Name)
99+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
100+
if err != nil {
101+
framework.Logf("Error getting node %s: %v", node.Name, err)
102+
return false
103+
}
104+
return nodeutil.IsNodeReady(nodeObj)
94105
}, kubeletRestoreTimeout, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s should be Ready after cleanup", node.Name))
95106
}
96107
}
@@ -112,12 +123,22 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
112123

113124
g.By("Checking that the node is not in state Ready due to kubelet constraint")
114125
o.Eventually(func() bool {
115-
return !isNodeReady(oc, targetNode.Name)
126+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), targetNode.Name, metav1.GetOptions{})
127+
if err != nil {
128+
framework.Logf("Error getting node %s: %v", targetNode.Name, err)
129+
return false
130+
}
131+
return !nodeutil.IsNodeReady(nodeObj)
116132
}, kubeletDisruptionTimeout, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s is not in state Ready after kubelet constraint is applied", targetNode.Name))
117133

118134
g.By(fmt.Sprintf("Ensuring surviving node %s remains Ready during kubelet disruption", survivingNode.Name))
119135
o.Consistently(func() bool {
120-
return isNodeReady(oc, survivingNode.Name)
136+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), survivingNode.Name, metav1.GetOptions{})
137+
if err != nil {
138+
framework.Logf("Error getting node %s: %v", survivingNode.Name, err)
139+
return false
140+
}
141+
return nodeutil.IsNodeReady(nodeObj)
121142
}, 2*time.Minute, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Surviving node %s should remain Ready during kubelet disruption", survivingNode.Name))
122143

123144
g.By("Validating etcd cluster remains healthy with surviving node")
@@ -131,13 +152,23 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
131152

132153
g.By("Waiting for target node to become Ready after kubelet constraint removal")
133154
o.Eventually(func() bool {
134-
return isNodeReady(oc, targetNode.Name)
155+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), targetNode.Name, metav1.GetOptions{})
156+
if err != nil {
157+
framework.Logf("Error getting node %s: %v", targetNode.Name, err)
158+
return false
159+
}
160+
return nodeutil.IsNodeReady(nodeObj)
135161
}, kubeletRestoreTimeout, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s should become Ready after kubelet constraint removal", targetNode.Name))
136162

137163
g.By("Validating both nodes are Ready after kubelet constraint removal")
138164
for _, node := range nodes {
139165
o.Eventually(func() bool {
140-
return isNodeReady(oc, node.Name)
166+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
167+
if err != nil {
168+
framework.Logf("Error getting node %s: %v", node.Name, err)
169+
return false
170+
}
171+
return nodeutil.IsNodeReady(nodeObj)
141172
}, kubeletRestoreTimeout, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s should be Ready after kubelet constraint removal", node.Name))
142173
}
143174

@@ -185,7 +216,12 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual
185216
g.By("Validating both nodes are Ready after kubelet service automatic restart")
186217
for _, node := range nodes {
187218
o.Eventually(func() bool {
188-
return isNodeReady(oc, node.Name)
219+
nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
220+
if err != nil {
221+
framework.Logf("Error getting node %s: %v", node.Name, err)
222+
return false
223+
}
224+
return nodeutil.IsNodeReady(nodeObj)
189225
}, kubeletRestoreTimeout, kubeletPollInterval).Should(o.BeTrue(), fmt.Sprintf("Node %s should be Ready after kubelet automatic restart", node.Name))
190226
}
191227

0 commit comments

Comments
 (0)