Skip to content

Commit b5fcafc

Browse files
committed
Reconcile a Basic pgAdmin Deployment
This commit allows for the creation of a pgAdmin deployment. As the initial implementation of pgAdmin for v5, it reconciles the following: A Deployment with a single replica only using the latest PGO v4 crunchy-pgadmin image A Service for accessing pgadmin with a configurable type A PVC for storing pgAdmin data Additionally, the Deployment scales to 0 when the cluster is shut down, and the following items are configurable: resources, labels, annotation, affinity, priority classes, and topology spread constraints Image pull secrets and policies are also supported. Please note that the current startup scripts within the crunchy-pgadmin container are being leveraged, but further integration is required before pgAdmin can be used for managing PostgresClusters. Finally, when the pgAdmin section is removed from the spec, all pgAdmin resources are be shut down and removed, including the PVC (retention is based on the PV configuration). Issue: [sc12517]
1 parent ae68551 commit b5fcafc

File tree

20 files changed

+4037
-4
lines changed

20 files changed

+4037
-4
lines changed

config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml

Lines changed: 1011 additions & 0 deletions
Large diffs are not rendered by default.

config/manager/manager.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ spec:
2222
value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:centos8-14.0-0"
2323
- name: RELATED_IMAGE_POSTGRES_14_GIS_3.1
2424
value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:centos8-14.0-3.1-0"
25+
- name: RELATED_IMAGE_PGADMIN
26+
value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:centos8-13.4-4.7.3"
2527
- name: RELATED_IMAGE_PGBACKREST
2628
value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.35-0"
2729
- name: RELATED_IMAGE_PGBOUNCER

docs/content/references/crd.md

Lines changed: 1445 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

installers/olm/config/redhat/related-images.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ spec:
1313
containers:
1414
- name: operator
1515
env:
16+
- { name: RELATED_IMAGE_PGADMIN, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgadmin4:ubi8-13.4-4.7.3' }
1617
- { name: RELATED_IMAGE_PGBACKREST, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbackrest:ubi8-2.35-0' }
1718
- { name: RELATED_IMAGE_PGBOUNCER, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbouncer:ubi8-1.15-3' }
1819
- { name: RELATED_IMAGE_PGEXPORTER, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter:ubi8-5.0.3-0' }

internal/config/config.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,17 @@ func PGBackRestContainerImage(cluster *v1beta1.PostgresCluster) string {
4545
return defaultFromEnv(image, "RELATED_IMAGE_PGBACKREST")
4646
}
4747

48+
// PGAdminContainerImage returns the container image to use for pgAdmin.
49+
func PGAdminContainerImage(cluster *v1beta1.PostgresCluster) string {
50+
var image string
51+
if cluster.Spec.UserInterface != nil &&
52+
cluster.Spec.UserInterface.PGAdmin != nil {
53+
image = cluster.Spec.UserInterface.PGAdmin.Image
54+
}
55+
56+
return defaultFromEnv(image, "RELATED_IMAGE_PGADMIN")
57+
}
58+
4859
// PGBouncerContainerImage returns the container image to use for pgBouncer.
4960
func PGBouncerContainerImage(cluster *v1beta1.PostgresCluster) string {
5061
var image string

internal/config/config_test.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,24 @@ func unsetEnv(t testing.TB, key string) {
4949
assert.NilError(t, os.Unsetenv(key))
5050
}
5151

52+
func TestPGAdminContainerImage(t *testing.T) {
53+
cluster := &v1beta1.PostgresCluster{}
54+
55+
unsetEnv(t, "RELATED_IMAGE_PGADMIN")
56+
assert.Equal(t, PGAdminContainerImage(cluster), "")
57+
58+
setEnv(t, "RELATED_IMAGE_PGADMIN", "")
59+
assert.Equal(t, PGAdminContainerImage(cluster), "")
60+
61+
setEnv(t, "RELATED_IMAGE_PGADMIN", "env-var-pgadmin")
62+
assert.Equal(t, PGAdminContainerImage(cluster), "env-var-pgadmin")
63+
64+
assert.NilError(t, yaml.Unmarshal([]byte(`{
65+
userInterface: { pgAdmin: { image: spec-image } },
66+
}`), &cluster.Spec))
67+
assert.Equal(t, PGAdminContainerImage(cluster), "spec-image")
68+
}
69+
5270
func TestPGBackRestContainerImage(t *testing.T) {
5371
cluster := &v1beta1.PostgresCluster{}
5472

internal/controller/postgrescluster/controller.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,8 +289,9 @@ func (r *Reconciler) Reconcile(
289289
if err == nil {
290290
err = r.reconcileDatabaseInitSQL(ctx, cluster, instances)
291291
}
292-
293-
// TODO reconcile pgadmin4
292+
if err == nil {
293+
err = r.reconcilePGAdmin(ctx, cluster)
294+
}
294295

295296
// at this point everything reconciled successfully, and we can update the
296297
// observedGeneration
Lines changed: 285 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,285 @@
1+
/*
2+
Copyright 2021 Crunchy Data Solutions, Inc.
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
*/
15+
16+
package postgrescluster
17+
18+
import (
19+
"context"
20+
21+
"github.com/pkg/errors"
22+
appsv1 "k8s.io/api/apps/v1"
23+
corev1 "k8s.io/api/core/v1"
24+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25+
"k8s.io/apimachinery/pkg/util/intstr"
26+
"sigs.k8s.io/controller-runtime/pkg/client"
27+
28+
"github.com/crunchydata/postgres-operator/internal/initialize"
29+
"github.com/crunchydata/postgres-operator/internal/naming"
30+
"github.com/crunchydata/postgres-operator/internal/pgadmin"
31+
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
32+
)
33+
34+
// reconcilePGAdmin writes the objects necessary to run a pgAdmin Pod.
35+
func (r *Reconciler) reconcilePGAdmin(
36+
ctx context.Context, cluster *v1beta1.PostgresCluster,
37+
) error {
38+
39+
// TODO(tjmoore4): Currently, the returned service is only used in tests,
40+
// but it may be useful during upcoming feature enhancements. If not, we
41+
// may consider removing the service return altogether and refactoring
42+
// this function to only return errors.
43+
_, err := r.reconcilePGAdminService(ctx, cluster)
44+
var dataVolume *corev1.PersistentVolumeClaim
45+
if err == nil {
46+
dataVolume, err = r.reconcilePGAdminDataVolume(ctx, cluster)
47+
}
48+
if err == nil {
49+
err = r.reconcilePGAdminDeployment(ctx, cluster, dataVolume)
50+
}
51+
return err
52+
}
53+
54+
// generatePGAdminService returns a v1.Service that exposes pgAdmin pods.
55+
// The ServiceType comes from the cluster user interface spec.
56+
func (r *Reconciler) generatePGAdminService(
57+
cluster *v1beta1.PostgresCluster) (*corev1.Service, bool, error,
58+
) {
59+
service := &corev1.Service{ObjectMeta: naming.ClusterPGAdmin(cluster)}
60+
service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service"))
61+
62+
if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil {
63+
return service, false, nil
64+
}
65+
66+
service.Annotations = naming.Merge(
67+
cluster.Spec.Metadata.GetAnnotationsOrNil(),
68+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil())
69+
service.Labels = naming.Merge(
70+
cluster.Spec.Metadata.GetLabelsOrNil(),
71+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(),
72+
map[string]string{
73+
naming.LabelCluster: cluster.Name,
74+
naming.LabelRole: naming.RolePGAdmin,
75+
})
76+
77+
// Allocate an IP address and/or node port and let Kubernetes manage the
78+
// Endpoints by selecting Pods with the pgAdmin role.
79+
// - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service
80+
service.Spec.Selector = map[string]string{
81+
naming.LabelCluster: cluster.Name,
82+
naming.LabelRole: naming.RolePGAdmin,
83+
}
84+
if spec := cluster.Spec.UserInterface.PGAdmin.Service; spec != nil {
85+
service.Spec.Type = corev1.ServiceType(spec.Type)
86+
} else {
87+
service.Spec.Type = corev1.ServiceTypeClusterIP
88+
}
89+
90+
// The TargetPort must be the name (not the number) of the pgAdmin
91+
// ContainerPort. This name allows the port number to differ between Pods,
92+
// which can happen during a rolling update.
93+
//
94+
// TODO(tjmoore4): A custom service port is not currently supported as this
95+
// requires updates to the pgAdmin service configuration, but the spec
96+
// structures are in place to facilitate further enhancement.
97+
service.Spec.Ports = []corev1.ServicePort{{
98+
Name: naming.PortPGAdmin,
99+
Port: *cluster.Spec.UserInterface.PGAdmin.Port,
100+
Protocol: corev1.ProtocolTCP,
101+
TargetPort: intstr.FromString(naming.PortPGAdmin),
102+
}}
103+
104+
err := errors.WithStack(r.setControllerReference(cluster, service))
105+
106+
return service, true, err
107+
}
108+
109+
// +kubebuilder:rbac:groups="",resources="services",verbs={get}
110+
// +kubebuilder:rbac:groups="",resources="services",verbs={create,delete,patch}
111+
112+
// reconcilePGAdminService writes the Service that resolves to pgAdmin.
113+
func (r *Reconciler) reconcilePGAdminService(
114+
ctx context.Context, cluster *v1beta1.PostgresCluster,
115+
) (*corev1.Service, error) {
116+
service, specified, err := r.generatePGAdminService(cluster)
117+
118+
if err == nil && !specified {
119+
// pgAdmin is disabled; delete the Service if it exists. Check the client
120+
// cache first using Get.
121+
key := client.ObjectKeyFromObject(service)
122+
err := errors.WithStack(r.Client.Get(ctx, key, service))
123+
if err == nil {
124+
err = errors.WithStack(r.deleteControlled(ctx, cluster, service))
125+
}
126+
return nil, client.IgnoreNotFound(err)
127+
}
128+
129+
if err == nil {
130+
err = errors.WithStack(r.apply(ctx, service))
131+
}
132+
return service, err
133+
}
134+
135+
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get
136+
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=create;delete;patch
137+
138+
// reconcilePGAdminDeployment writes the Deployment that runs pgAdmin.
139+
func (r *Reconciler) reconcilePGAdminDeployment(
140+
ctx context.Context, cluster *v1beta1.PostgresCluster,
141+
dataVolume *corev1.PersistentVolumeClaim,
142+
) error {
143+
deploy := &appsv1.Deployment{ObjectMeta: naming.ClusterPGAdmin(cluster)}
144+
deploy.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("Deployment"))
145+
146+
if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil {
147+
// pgAdmin is disabled; delete the Deployment if it exists. Check the
148+
// client cache first using Get.
149+
key := client.ObjectKeyFromObject(deploy)
150+
err := errors.WithStack(r.Client.Get(ctx, key, deploy))
151+
if err == nil {
152+
err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy))
153+
}
154+
return client.IgnoreNotFound(err)
155+
}
156+
157+
deploy.Annotations = naming.Merge(
158+
cluster.Spec.Metadata.GetAnnotationsOrNil(),
159+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil())
160+
deploy.Labels = naming.Merge(
161+
cluster.Spec.Metadata.GetLabelsOrNil(),
162+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(),
163+
map[string]string{
164+
naming.LabelCluster: cluster.Name,
165+
naming.LabelRole: naming.RolePGAdmin,
166+
naming.LabelData: naming.DataPGAdmin,
167+
})
168+
deploy.Spec.Selector = &metav1.LabelSelector{
169+
MatchLabels: map[string]string{
170+
naming.LabelCluster: cluster.Name,
171+
naming.LabelRole: naming.RolePGAdmin,
172+
},
173+
}
174+
deploy.Spec.Template.Annotations = naming.Merge(
175+
cluster.Spec.Metadata.GetAnnotationsOrNil(),
176+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil())
177+
deploy.Spec.Template.Labels = naming.Merge(
178+
cluster.Spec.Metadata.GetLabelsOrNil(),
179+
cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(),
180+
map[string]string{
181+
naming.LabelCluster: cluster.Name,
182+
naming.LabelRole: naming.RolePGAdmin,
183+
naming.LabelData: naming.DataPGAdmin,
184+
})
185+
186+
// if the shutdown flag is set, set pgAdmin replicas to 0
187+
if cluster.Spec.Shutdown != nil && *cluster.Spec.Shutdown {
188+
deploy.Spec.Replicas = initialize.Int32(0)
189+
} else {
190+
deploy.Spec.Replicas = cluster.Spec.UserInterface.PGAdmin.Replicas
191+
}
192+
193+
// Don't clutter the namespace with extra ReplicaSets.
194+
deploy.Spec.RevisionHistoryLimit = initialize.Int32(0)
195+
196+
// Ensure that the number of Ready pods is never less than the specified
197+
// Replicas by starting new pods while old pods are still running.
198+
// - https://docs.k8s.io/concepts/workloads/controllers/deployment/#rolling-update-deployment
199+
deploy.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType
200+
deploy.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
201+
MaxUnavailable: intstr.ValueOrDefault(nil, intstr.FromInt(0)),
202+
}
203+
204+
// Use scheduling constraints from the cluster spec.
205+
deploy.Spec.Template.Spec.Affinity = cluster.Spec.UserInterface.PGAdmin.Affinity
206+
deploy.Spec.Template.Spec.Tolerations = cluster.Spec.UserInterface.PGAdmin.Tolerations
207+
208+
if cluster.Spec.UserInterface.PGAdmin.PriorityClassName != nil {
209+
deploy.Spec.Template.Spec.PriorityClassName = *cluster.Spec.UserInterface.PGAdmin.PriorityClassName
210+
}
211+
212+
deploy.Spec.Template.Spec.TopologySpreadConstraints =
213+
cluster.Spec.UserInterface.PGAdmin.TopologySpreadConstraints
214+
215+
// Restart containers any time they stop, die, are killed, etc.
216+
// - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy
217+
deploy.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
218+
219+
// pgAdmin does not make any Kubernetes API calls. Use the default
220+
// ServiceAccount and do not mount its credentials.
221+
deploy.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false)
222+
223+
deploy.Spec.Template.Spec.SecurityContext = initialize.RestrictedPodSecurityContext()
224+
225+
// set the image pull secrets, if any exist
226+
deploy.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets
227+
228+
err := errors.WithStack(r.setControllerReference(cluster, deploy))
229+
230+
if err == nil {
231+
pgadmin.Pod(cluster, &deploy.Spec.Template.Spec, dataVolume)
232+
}
233+
if err == nil {
234+
err = errors.WithStack(r.apply(ctx, deploy))
235+
}
236+
237+
return err
238+
}
239+
240+
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=create;patch
241+
242+
// reconcilePGAdminDataVolume writes the PersistentVolumeClaim for instance's
243+
// pgAdmin data volume.
244+
func (r *Reconciler) reconcilePGAdminDataVolume(
245+
ctx context.Context, cluster *v1beta1.PostgresCluster,
246+
) (*corev1.PersistentVolumeClaim, error) {
247+
248+
labelMap := map[string]string{
249+
naming.LabelCluster: cluster.Name,
250+
naming.LabelRole: naming.RolePGAdmin,
251+
naming.LabelData: naming.DataPGAdmin,
252+
}
253+
254+
pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterPGAdmin(cluster)}
255+
pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"))
256+
257+
if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil {
258+
// pgAdmin is disabled; delete the PVC if it exists. Check the client
259+
// cache first using Get.
260+
key := client.ObjectKeyFromObject(pvc)
261+
err := errors.WithStack(r.Client.Get(ctx, key, pvc))
262+
if err == nil {
263+
err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc))
264+
}
265+
return nil, client.IgnoreNotFound(err)
266+
}
267+
268+
pvc.Annotations = naming.Merge(
269+
cluster.Spec.Metadata.GetAnnotationsOrNil(),
270+
)
271+
pvc.Labels = naming.Merge(
272+
cluster.Spec.Metadata.GetLabelsOrNil(),
273+
labelMap,
274+
)
275+
pvc.Spec = cluster.Spec.UserInterface.PGAdmin.DataVolumeClaimSpec
276+
277+
err := errors.WithStack(r.setControllerReference(cluster, pvc))
278+
279+
if err == nil {
280+
err = r.handlePersistentVolumeClaimError(cluster,
281+
errors.WithStack(r.apply(ctx, pvc)))
282+
}
283+
284+
return pvc, err
285+
}

0 commit comments

Comments
 (0)