Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
b40cd78
Initial code, and removal of reset credentials
jsoriano Dec 24, 2024
47532c3
Assume 410 status gone is ok for elasticsearch
jsoriano Dec 24, 2024
b9e112f
Refactor client tests so they don't try to use the configured client …
jsoriano Dec 24, 2024
a44469d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 26, 2024
cd980a6
Refactor shellinit
jsoriano Dec 26, 2024
5b41cd9
Use API key in stack clients
jsoriano Dec 26, 2024
12aaebe
Ignore errors when getting logs from a non-local elasticsearch
jsoriano Dec 26, 2024
cce94bd
Share logic to start local services
jsoriano Dec 26, 2024
b3b1e76
Fix spaces in logstash config
jsoriano Dec 27, 2024
3797d20
Prepare interfaces to create policies and getting enrollment tokens
jsoriano Dec 27, 2024
04e22d2
Initial enrollment works
jsoriano Dec 27, 2024
8f17940
Tear down
jsoriano Dec 27, 2024
83beb64
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 30, 2024
290c6d9
Fix tear down
jsoriano Dec 30, 2024
be6dd46
Fix system tests
jsoriano Dec 30, 2024
6169e15
Get kibana host directly from the config?
jsoriano Dec 30, 2024
2e12e02
Fix stack up with logstash
jsoriano Dec 30, 2024
f8d1cee
Fix logstash with api keys
jsoriano Dec 30, 2024
9a24380
Better idempotence
jsoriano Dec 30, 2024
c4822eb
Remove unused variable
jsoriano Dec 30, 2024
7295a2e
Revert change in initialization of kibana host
jsoriano Dec 30, 2024
0ec34f2
Implement status for environment provider
jsoriano Dec 31, 2024
5f000c5
Try to support local Fleet Server for remote stacks
jsoriano Jan 2, 2025
0a188b4
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 2, 2025
184209e
Fix certifictes on agent deployer
jsoriano Jan 3, 2025
d4d32ac
Fix fleet status when fleet server is locally managed
jsoriano Jan 3, 2025
038549c
Reuse existing fleet server hosts
jsoriano Jan 3, 2025
91f2b2d
Add options for API key in clients
jsoriano Jan 3, 2025
b854ca9
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 3, 2025
0d1a1b2
Merge branch 'api-key-clients' into api-key-support
jsoriano Jan 3, 2025
74f2049
Add host.docker.internal to the local services
jsoriano Jan 3, 2025
bbbc671
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 7, 2025
0095a32
Polish status
jsoriano Jan 7, 2025
f60e15d
Add output id to stack config
jsoriano Jan 7, 2025
0c407a0
Fix error formatting value
jsoriano Jan 7, 2025
f53325d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 8, 2025
dcc5e0b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 13, 2025
c65452b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 14, 2025
ffeb24c
Remove unused API keys
jsoriano Jan 15, 2025
1079df7
Fix issues after merge
jsoriano Jan 15, 2025
699623e
Fix kubernetes agent deployer
jsoriano Jan 17, 2025
699cb0f
Add tech preview warning
jsoriano Jan 17, 2025
52ec637
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 17, 2025
aa71071
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 20, 2025
d728838
Pass context to call to get enrollment tokens
jsoriano Jan 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fix kubernetes agent deployer
  • Loading branch information
jsoriano committed Jan 17, 2025
commit 699623e26c013769735d26c345c338166ecbc7e8
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ spec:
value: {{ .fleetURL }}
# If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed
- name: FLEET_ENROLLMENT_TOKEN
value: ""
value: "{{ .enrollmentToken }}"
- name: FLEET_TOKEN_POLICY_NAME
value: "{{ .elasticAgentTokenPolicyName }}"
- name: KIBANA_HOST
value: {{ .kibanaURL }}
- name: KIBANA_FLEET_USERNAME
value: "elastic"
value: {{ .username }}
- name: KIBANA_FLEET_PASSWORD
value: "changeme"
value: {{ .password }}
- name: SSL_CERT_DIR
value: "/etc/ssl/certs:/etc/ssl/elastic-package"
- name: NODE_NAME
Expand Down
47 changes: 39 additions & 8 deletions internal/agentdeployer/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ type kubernetesDeployedAgent struct {
}

func (s kubernetesDeployedAgent) TearDown(ctx context.Context) error {
elasticAgentManagedYaml, err := getElasticAgentYAML(s.profile, s.stackVersion, s.agentInfo.Policy.Name, s.agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(s.profile, s.agentInfo, s.stackVersion, s.agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand Down Expand Up @@ -123,7 +123,7 @@ func (ksd *KubernetesAgentDeployer) SetUp(ctx context.Context, agentInfo AgentIn
if ksd.runTearDown || ksd.runTestsOnly {
logger.Debug("Skip install Elastic Agent in cluster")
} else {
err = installElasticAgentInCluster(ctx, ksd.profile, ksd.stackVersion, agentInfo.Policy.Name, agentName)
err = installElasticAgentInCluster(ctx, ksd.profile, agentInfo, ksd.stackVersion, agentName)
if err != nil {
return nil, fmt.Errorf("can't install Elastic-Agent in the Kubernetes cluster: %w", err)
}
Expand Down Expand Up @@ -155,10 +155,10 @@ func (ksd *KubernetesAgentDeployer) agentName() string {

var _ AgentDeployer = new(KubernetesAgentDeployer)

func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, stackVersion, policyName, agentName string) error {
func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) error {
logger.Debug("install Elastic Agent in the Kubernetes cluster")

elasticAgentManagedYaml, err := getElasticAgentYAML(profile, stackVersion, policyName, agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(profile, agentInfo, stackVersion, agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand All @@ -176,8 +176,36 @@ func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile,
//go:embed _static/elastic-agent-managed.yaml.tmpl
var elasticAgentManagedYamlTmpl string

func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, agentName string) ([]byte, error) {
func getElasticAgentYAML(profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) ([]byte, error) {
logger.Debugf("Prepare YAML definition for Elastic Agent running in stack v%s", stackVersion)
config, err := stack.LoadConfig(profile)
if err != nil {
return nil, fmt.Errorf("failed to load config from profile: %w", err)
}
fleetURL := "https://fleet-server:8220"
kibanaURL := "https://kibana:5601"
if config.Provider != stack.ProviderCompose {
kibanaURL = config.KibanaHost
}
if url, ok := config.Parameters[stack.ParamServerlessFleetURL]; ok {
fleetURL = url
}
if version, ok := config.Parameters[stack.ParamServerlessLocalStackVersion]; ok {
stackVersion = version
}

enrollmentToken := ""
if config.ElasticsearchAPIKey != "" {
// TODO: Review if this is the correct place to get the enrollment token.
kibanaClient, err := stack.NewKibanaClientFromProfile(profile)
if err != nil {
return nil, fmt.Errorf("failed to create kibana client: %w", err)
}
enrollmentToken, err = kibanaClient.GetEnrollmentTokenForPolicyID(context.TODO(), agentInfo.Policy.ID)
if err != nil {
return nil, fmt.Errorf("failed to get enrollment token for policy %q: %w", agentInfo.Policy.Name, err)
}
}

appConfig, err := install.Configuration(install.OptionWithStackVersion(stackVersion))
if err != nil {
Expand All @@ -193,11 +221,14 @@ func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, age

var elasticAgentYaml bytes.Buffer
err = tmpl.Execute(&elasticAgentYaml, map[string]string{
"fleetURL": "https://fleet-server:8220",
"kibanaURL": "https://kibana:5601",
"fleetURL": fleetURL,
"kibanaURL": kibanaURL,
"username": config.ElasticsearchUsername,
"password": config.ElasticsearchPassword,
"enrollmentToken": enrollmentToken,
"caCertPem": caCert,
"elasticAgentImage": appConfig.StackImageRefs().ElasticAgent,
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, policyName),
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, agentInfo.Policy.Name),
"agentName": agentName,
})
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions internal/kubectl/kubectl_apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ func waitForReadyResources(resources []resource) error {
// be unavailable (DaemonSet.spec.updateStrategy.rollingUpdate.maxUnavailable defaults to 1).
// daemonSetReady will return true regardless of the pod not being ready yet.
// Can be solved with multi-node clusters.
// TODO: Support context cancelation in this wait. We rely on a helm waiter
// that doesn't support it.
err := kubeClient.Wait(resList, readinessTimeout)
if err != nil {
return fmt.Errorf("waiter failed: %w", err)
Expand Down
12 changes: 6 additions & 6 deletions internal/testrunner/runners/system/tester.go
Original file line number Diff line number Diff line change
Expand Up @@ -561,18 +561,18 @@ func (r *tester) tearDownTest(ctx context.Context) error {
r.removeAgentHandler = nil
}

if r.deleteTestPolicyHandler != nil {
if err := r.deleteTestPolicyHandler(cleanupCtx); err != nil {
if r.shutdownAgentHandler != nil {
if err := r.shutdownAgentHandler(cleanupCtx); err != nil {
return err
}
r.deleteTestPolicyHandler = nil
r.shutdownAgentHandler = nil
}

if r.shutdownAgentHandler != nil {
if err := r.shutdownAgentHandler(cleanupCtx); err != nil {
if r.deleteTestPolicyHandler != nil {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Reordering this because the agent deployer needs to get the YAML for the agent, and it contains an enrollment token now, that needs to be retrieved for an existing policy.

We could refactor tear down to don't require this token there, but in any case it makes sense to delete the policy after deleting the agents using it.

if err := r.deleteTestPolicyHandler(cleanupCtx); err != nil {
return err
}
r.shutdownAgentHandler = nil
r.deleteTestPolicyHandler = nil
}

return nil
Expand Down