Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
b40cd78
Initial code, and removal of reset credentials
jsoriano Dec 24, 2024
47532c3
Assume 410 status gone is ok for elasticsearch
jsoriano Dec 24, 2024
b9e112f
Refactor client tests so they don't try to use the configured client …
jsoriano Dec 24, 2024
a44469d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 26, 2024
cd980a6
Refactor shellinit
jsoriano Dec 26, 2024
5b41cd9
Use API key in stack clients
jsoriano Dec 26, 2024
12aaebe
Ignore errors when getting logs from a non-local elasticsearch
jsoriano Dec 26, 2024
cce94bd
Share logic to start local services
jsoriano Dec 26, 2024
b3b1e76
Fix spaces in logstash config
jsoriano Dec 27, 2024
3797d20
Prepare interfaces to create policies and getting enrollment tokens
jsoriano Dec 27, 2024
04e22d2
Initial enrollment works
jsoriano Dec 27, 2024
8f17940
Tear down
jsoriano Dec 27, 2024
83beb64
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 30, 2024
290c6d9
Fix tear down
jsoriano Dec 30, 2024
be6dd46
Fix system tests
jsoriano Dec 30, 2024
6169e15
Get kibana host directly from the config?
jsoriano Dec 30, 2024
2e12e02
Fix stack up with logstash
jsoriano Dec 30, 2024
f8d1cee
Fix logstash with api keys
jsoriano Dec 30, 2024
9a24380
Better idempotence
jsoriano Dec 30, 2024
c4822eb
Remove unused variable
jsoriano Dec 30, 2024
7295a2e
Revert change in initialization of kibana host
jsoriano Dec 30, 2024
0ec34f2
Implement status for environment provider
jsoriano Dec 31, 2024
5f000c5
Try to support local Fleet Server for remote stacks
jsoriano Jan 2, 2025
0a188b4
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 2, 2025
184209e
Fix certifictes on agent deployer
jsoriano Jan 3, 2025
d4d32ac
Fix fleet status when fleet server is locally managed
jsoriano Jan 3, 2025
038549c
Reuse existing fleet server hosts
jsoriano Jan 3, 2025
91f2b2d
Add options for API key in clients
jsoriano Jan 3, 2025
b854ca9
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 3, 2025
0d1a1b2
Merge branch 'api-key-clients' into api-key-support
jsoriano Jan 3, 2025
74f2049
Add host.docker.internal to the local services
jsoriano Jan 3, 2025
bbbc671
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 7, 2025
0095a32
Polish status
jsoriano Jan 7, 2025
f60e15d
Add output id to stack config
jsoriano Jan 7, 2025
0c407a0
Fix error formatting value
jsoriano Jan 7, 2025
f53325d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 8, 2025
dcc5e0b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 13, 2025
c65452b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 14, 2025
ffeb24c
Remove unused API keys
jsoriano Jan 15, 2025
1079df7
Fix issues after merge
jsoriano Jan 15, 2025
699623e
Fix kubernetes agent deployer
jsoriano Jan 17, 2025
699cb0f
Add tech preview warning
jsoriano Jan 17, 2025
52ec637
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 17, 2025
aa71071
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 20, 2025
d728838
Pass context to call to get enrollment tokens
jsoriano Jan 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Remove unused API keys
  • Loading branch information
jsoriano committed Jan 15, 2025
commit ffeb24ce77e1a6780123f5e1dc92c5ae130e3938
1 change: 0 additions & 1 deletion internal/serverless/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ type Project struct {
Region string `json:"region_id"`

Credentials struct {
APIKey string `json:"apiKey,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
} `json:"credentials"`
Expand Down
153 changes: 136 additions & 17 deletions internal/stack/serverless.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,11 @@ import (
"errors"
"fmt"
"slices"
"strings"
"time"

"github.com/elastic/elastic-package/internal/compose"
"github.com/elastic/elastic-package/internal/docker"
"github.com/elastic/elastic-package/internal/elasticsearch"
"github.com/elastic/elastic-package/internal/kibana"
"github.com/elastic/elastic-package/internal/logger"
Expand Down Expand Up @@ -76,7 +79,6 @@ func (sp *serverlessProvider) createProject(ctx context.Context, settings projec
}
config.ElasticsearchHost = project.Endpoints.Elasticsearch
config.KibanaHost = project.Endpoints.Kibana
config.ElasticsearchAPIKey = project.Credentials.APIKey
config.ElasticsearchUsername = project.Credentials.Username
config.ElasticsearchPassword = project.Credentials.Password

Expand Down Expand Up @@ -288,10 +290,7 @@ func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error
}

logger.Infof("Starting local services")
localServices := &localServicesManager{
profile: sp.profile,
}
err = localServices.start(ctx, options, config)
err = sp.startLocalServices(ctx, options, config)
if err != nil {
return fmt.Errorf("failed to start local services: %w", err)
}
Expand All @@ -308,6 +307,55 @@ func (sp *serverlessProvider) BootUp(ctx context.Context, options Options) error
return nil
}

func (sp *serverlessProvider) composeProjectName() string {
return DockerComposeProjectName(sp.profile)
}

func (sp *serverlessProvider) localServicesComposeProject() (*compose.Project, error) {
composeFile := sp.profile.Path(ProfileStackPath, ComposeFile)
return compose.NewProject(sp.composeProjectName(), composeFile)
}

func (sp *serverlessProvider) startLocalServices(ctx context.Context, options Options, config Config) error {
err := applyServerlessResources(sp.profile, options.StackVersion, config)
if err != nil {
return fmt.Errorf("could not initialize compose files for local services: %w", err)
}

project, err := sp.localServicesComposeProject()
if err != nil {
return fmt.Errorf("could not initialize local services compose project")
}

opts := compose.CommandOptions{
ExtraArgs: []string{},
}
err = project.Build(ctx, opts)
if err != nil {
return fmt.Errorf("failed to build images for local services: %w", err)
}

if options.DaemonMode {
opts.ExtraArgs = append(opts.ExtraArgs, "-d")
}
if err := project.Up(ctx, opts); err != nil {
// At least starting on 8.6.0, fleet-server may be reconfigured or
// restarted after being healthy. If elastic-agent tries to enroll at
// this moment, it fails inmediately, stopping and making `docker-compose up`
// to fail too.
// As a workaround, try to give another chance to docker-compose if only
// elastic-agent failed.
if onlyElasticAgentFailed(ctx, options) && !errors.Is(err, context.Canceled) {
fmt.Println("Elastic Agent failed to start, trying again.")
if err := project.Up(ctx, opts); err != nil {
return fmt.Errorf("failed to start local services: %w", err)
}
}
}

return nil
}

func (sp *serverlessProvider) TearDown(ctx context.Context, options Options) error {
config, err := LoadConfig(sp.profile)
if err != nil {
Expand All @@ -316,20 +364,13 @@ func (sp *serverlessProvider) TearDown(ctx context.Context, options Options) err

var errs error

localServices := &localServicesManager{
profile: sp.profile,
}
err = localServices.destroy(ctx)
err = sp.destroyLocalServices(ctx)
if err != nil {
logger.Errorf("failed to destroy local services: %v", err)
errs = fmt.Errorf("failed to destroy local services: %w", err)
}

project, err := sp.currentProject(ctx, config)
if errors.Is(err, serverless.ErrProjectNotExist) {
logger.Debug("Project does not exist")
return nil
}
if err != nil {
return fmt.Errorf("failed to find current project: %w", err)
}
Expand All @@ -347,6 +388,24 @@ func (sp *serverlessProvider) TearDown(ctx context.Context, options Options) err
return errs
}

func (sp *serverlessProvider) destroyLocalServices(ctx context.Context) error {
project, err := sp.localServicesComposeProject()
if err != nil {
return fmt.Errorf("could not initialize local services compose project")
}

opts := compose.CommandOptions{
// Remove associated volumes.
ExtraArgs: []string{"--volumes", "--remove-orphans"},
}
err = project.Down(ctx, opts)
if err != nil {
return fmt.Errorf("failed to destroy local services: %w", err)
}

return nil
}

func (sp *serverlessProvider) Update(ctx context.Context, options Options) error {
return fmt.Errorf("not implemented")
}
Expand Down Expand Up @@ -393,10 +452,7 @@ func (sp *serverlessProvider) Status(ctx context.Context, options Options) ([]Se
})
}

localServices := &localServicesManager{
profile: sp.profile,
}
agentStatus, err := localServices.status()
agentStatus, err := sp.localAgentStatus()
if err != nil {
return nil, fmt.Errorf("failed to get local agent status: %w", err)
}
Expand All @@ -405,3 +461,66 @@ func (sp *serverlessProvider) Status(ctx context.Context, options Options) ([]Se

return serviceStatus, nil
}

func (sp *serverlessProvider) localAgentStatus() ([]ServiceStatus, error) {
var services []ServiceStatus
serviceStatusFunc := func(description docker.ContainerDescription) error {
service, err := newServiceStatus(&description)
if err != nil {
return err
}
services = append(services, *service)
return nil
}

err := runOnLocalServices(sp.composeProjectName(), serviceStatusFunc)
if err != nil {
return nil, err
}

return services, nil
}

func localServiceNames(project string) ([]string, error) {
services := []string{}
serviceFunc := func(description docker.ContainerDescription) error {
services = append(services, description.Config.Labels.ComposeService)
return nil
}

err := runOnLocalServices(project, serviceFunc)
if err != nil {
return nil, err
}

return services, nil
}

func runOnLocalServices(project string, serviceFunc func(docker.ContainerDescription) error) error {
// query directly to docker to avoid load environment variables (e.g. STACK_VERSION_VARIANT) and profiles
containerIDs, err := docker.ContainerIDsWithLabel(projectLabelDockerCompose, project)
if err != nil {
return err
}

if len(containerIDs) == 0 {
return nil
}

containerDescriptions, err := docker.InspectContainers(containerIDs...)
if err != nil {
return err
}

for _, containerDescription := range containerDescriptions {
serviceName := containerDescription.Config.Labels.ComposeService
if strings.HasSuffix(serviceName, readyServicesSuffix) {
continue
}
err := serviceFunc(containerDescription)
if err != nil {
return err
}
}
return nil
}